text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import print_function
import MDAnalysis as mda
from MDAnalysis.analysis import contacts
from MDAnalysis.analysis.distances import distance_array
from numpy.testing import (dec, assert_almost_equal, assert_equal, raises,
assert_array_equal, assert_array_almost_equal)
import numpy as np
import os
from MDAnalysisTests.datafiles import (PSF,
DCD,
contacts_villin_folded,
contacts_villin_unfolded,
contacts_file, )
from MDAnalysisTests import parser_not_found, tempdir
def test_soft_cut_q():
# just check some of the extremal points
assert_equal(contacts.soft_cut_q([0], [0]), .5)
assert_almost_equal(contacts.soft_cut_q([100], [0]), 0)
assert_almost_equal(contacts.soft_cut_q([-100], [0]), 1)
def test_soft_cut_q_folded():
u = mda.Universe(contacts_villin_folded)
contacts_data = np.genfromtxt(contacts_file)
# indices have been stored 1 indexed
indices = contacts_data[:, :2].astype(int) - 1
r = np.linalg.norm(u.atoms.positions[indices[:, 0]] -
u.atoms.positions[indices[:, 1]], axis=1)
r0 = contacts_data[:, 2]
beta = 5.0
lambda_constant = 1.8
Q = 1 / (1 + np.exp(beta * (r - lambda_constant * r0)))
assert_almost_equal(Q.mean(), 1.0, decimal=3)
def test_soft_cut_q_unfolded():
u = mda.Universe(contacts_villin_unfolded)
contacts_data = np.genfromtxt(contacts_file)
# indices have been stored 1 indexed
indices = contacts_data[:, :2].astype(int) - 1
r = np.linalg.norm(u.atoms.positions[indices[:, 0]] -
u.atoms.positions[indices[:, 1]], axis=1)
r0 = contacts_data[:, 2]
beta = 5.0
lambda_constant = 1.8
Q = 1 / (1 + np.exp(beta * (r - lambda_constant * r0)))
assert_almost_equal(Q.mean(), 0.0, decimal=1)
def test_hard_cut_q():
# just check some extremal points
assert_equal(contacts.hard_cut_q([1], 2), 1)
assert_equal(contacts.hard_cut_q([2], 1), 0)
assert_equal(contacts.hard_cut_q([2, 0.5], 1), 0.5)
assert_equal(contacts.hard_cut_q([2, 3], [3, 4]), 1)
assert_equal(contacts.hard_cut_q([4, 5], [3, 4]), 0)
def test_radius_cut_q():
# check some extremal points
assert_equal(contacts.radius_cut_q([1], None, 2), 1)
assert_equal(contacts.radius_cut_q([2], None, 1), 0)
assert_equal(contacts.radius_cut_q([2, 0.5], None, 1), 0.5)
def test_contact_matrix():
d = np.arange(5)
radius = np.ones(5) * 2.5
out = contacts.contact_matrix(d, radius)
assert_array_equal(out, [True, True, True, False, False])
# check in-place update
out = np.empty(out.shape)
contacts.contact_matrix(d, radius, out=out)
assert_array_equal(out, [True, True, True, False, False])
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_new_selection():
u = mda.Universe(PSF, DCD)
selections = ('all', )
sel = contacts._new_selections(u, selections, -1)[0]
u.trajectory[-1]
assert_array_equal(sel.positions, u.atoms.positions)
def soft_cut(ref, u, selA, selB, radius=4.5, beta=5.0, lambda_constant=1.8):
"""
Reference implementation for testing
"""
# reference groups A and B from selection strings
refA, refB = ref.select_atoms(selA), ref.select_atoms(selB)
# 2D float array, reference distances (r0)
dref = distance_array(refA.positions, refB.positions)
# 2D bool array, select reference distances that are less than the cutoff
# radius
mask = dref < radius
# group A and B in a trajectory
grA, grB = u.select_atoms(selA), u.select_atoms(selB)
results = []
for ts in u.trajectory:
d = distance_array(grA.positions, grB.positions)
r, r0 = d[mask], dref[mask]
x = 1 / (1 + np.exp(beta * (r - lambda_constant * r0)))
# average/normalize and append to results
results.append((ts.time, x.sum() / mask.sum()))
return np.asarray(results)
class TestContacts(object):
@dec.skipif(
parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def __init__(self):
self.universe = mda.Universe(PSF, DCD)
self.trajectory = self.universe.trajectory
self.sel_basic = "(resname ARG LYS) and (name NH* NZ)"
self.sel_acidic = "(resname ASP GLU) and (name OE* OD*)"
def tearDown(self):
# reset trajectory
self.universe.trajectory[0]
del self.universe
def _run_Contacts(self, **kwargs):
acidic = self.universe.select_atoms(self.sel_acidic)
basic = self.universe.select_atoms(self.sel_basic)
return contacts.Contacts(
self.universe,
selection=(self.sel_acidic, self.sel_basic),
refgroup=(acidic, basic),
radius=6.0,
**kwargs).run()
def test_startframe(self):
"""test_startframe: TestContactAnalysis1: start frame set to 0 (resolution of
Issue #624)
"""
CA1 = self._run_Contacts()
assert_equal(len(CA1.timeseries), self.universe.trajectory.n_frames)
def test_end_zero(self):
"""test_end_zero: TestContactAnalysis1: stop frame 0 is not ignored"""
CA1 = self._run_Contacts(stop=0)
assert_equal(len(CA1.timeseries), 0)
def test_slicing(self):
start, stop, step = 10, 30, 5
CA1 = self._run_Contacts(start=start, stop=stop, step=step)
frames = np.arange(self.universe.trajectory.n_frames)[start:stop:step]
assert_equal(len(CA1.timeseries), len(frames))
@staticmethod
def test_villin_folded():
# one folded, one unfolded
f = mda.Universe(contacts_villin_folded)
u = mda.Universe(contacts_villin_unfolded)
sel = "protein and not name H*"
grF = f.select_atoms(sel)
q = contacts.Contacts(u,
selection=(sel, sel),
refgroup=(grF, grF),
method="soft_cut")
q.run()
results = soft_cut(f, u, sel, sel)
assert_almost_equal(q.timeseries[:, 1], results[:, 1])
@staticmethod
def test_villin_unfolded():
# both folded
f = mda.Universe(contacts_villin_folded)
u = mda.Universe(contacts_villin_folded)
sel = "protein and not name H*"
grF = f.select_atoms(sel)
q = contacts.Contacts(u,
selection=(sel, sel),
refgroup=(grF, grF),
method="soft_cut")
q.run()
results = soft_cut(f, u, sel, sel)
assert_almost_equal(q.timeseries[:, 1], results[:, 1])
def test_hard_cut_method(self):
ca = self._run_Contacts()
expected = [1., 0.58252427, 0.52427184, 0.55339806, 0.54368932,
0.54368932, 0.51456311, 0.46601942, 0.48543689, 0.52427184,
0.46601942, 0.58252427, 0.51456311, 0.48543689, 0.48543689,
0.48543689, 0.46601942, 0.51456311, 0.49514563, 0.49514563,
0.45631068, 0.47572816, 0.49514563, 0.50485437, 0.53398058,
0.50485437, 0.51456311, 0.51456311, 0.49514563, 0.49514563,
0.54368932, 0.50485437, 0.48543689, 0.55339806, 0.45631068,
0.46601942, 0.53398058, 0.53398058, 0.46601942, 0.52427184,
0.45631068, 0.46601942, 0.47572816, 0.46601942, 0.45631068,
0.47572816, 0.45631068, 0.48543689, 0.4368932, 0.4368932,
0.45631068, 0.50485437, 0.41747573, 0.4368932, 0.51456311,
0.47572816, 0.46601942, 0.46601942, 0.47572816, 0.47572816,
0.46601942, 0.45631068, 0.44660194, 0.47572816, 0.48543689,
0.47572816, 0.42718447, 0.40776699, 0.37864078, 0.42718447,
0.45631068, 0.4368932, 0.4368932, 0.45631068, 0.4368932,
0.46601942, 0.45631068, 0.48543689, 0.44660194, 0.44660194,
0.44660194, 0.42718447, 0.45631068, 0.44660194, 0.48543689,
0.48543689, 0.44660194, 0.4368932, 0.40776699, 0.41747573,
0.48543689, 0.45631068, 0.46601942, 0.47572816, 0.51456311,
0.45631068, 0.37864078, 0.42718447]
assert_equal(len(ca.timeseries), len(expected))
assert_array_almost_equal(ca.timeseries[:, 1], expected)
@staticmethod
def _is_any_closer(r, r0, dist=2.5):
return np.any(r < dist)
def test_own_method(self):
ca = self._run_Contacts(method=self._is_any_closer)
bound_expected = [1., 1., 0., 1., 1., 0., 0., 1., 0., 1., 1., 0., 0.,
1., 0., 0., 0., 0., 1., 1., 0., 0., 0., 1., 0., 1.,
0., 1., 1., 0., 1., 1., 1., 0., 0., 0., 0., 1., 0.,
0., 1., 0., 1., 1., 1., 0., 1., 0., 0., 1., 1., 1.,
0., 1., 0., 1., 1., 0., 0., 0., 1., 1., 1., 0., 0.,
1., 0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0., 1.,
0., 0., 1., 1., 0., 0., 1., 1., 1., 0., 1., 0., 0.,
1., 0., 1., 1., 1., 1., 1.]
assert_array_equal(ca.timeseries[:, 1], bound_expected)
@staticmethod
def _weird_own_method(r, r0):
return 'aaa'
@raises(ValueError)
def test_own_method_no_array_cast(self):
self._run_Contacts(method=self._weird_own_method, stop=2)
@raises(ValueError)
def test_non_callable_method(self):
self._run_Contacts(method=2, stop=2)
def test_save(self):
with tempdir.in_tempdir():
ca = self._run_Contacts()
ca.save('testfile.npy')
saved = np.genfromtxt('testfile.npy')
assert_array_almost_equal(ca.timeseries, saved)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_q1q2():
u = mda.Universe(PSF, DCD)
q1q2 = contacts.q1q2(u, 'name CA', radius=8)
q1q2.run()
q1_expected = [1., 0.98092643, 0.97366031, 0.97275204, 0.97002725,
0.97275204, 0.96276113, 0.96730245, 0.9582198, 0.96185286,
0.95367847, 0.96276113, 0.9582198, 0.95186194, 0.95367847,
0.95095368, 0.94187103, 0.95186194, 0.94277929, 0.94187103,
0.9373297, 0.93642144, 0.93097184, 0.93914623, 0.93278837,
0.93188011, 0.9373297, 0.93097184, 0.93188011, 0.92643052,
0.92824705, 0.92915531, 0.92643052, 0.92461399, 0.92279746,
0.92643052, 0.93278837, 0.93188011, 0.93369664, 0.9346049,
0.9373297, 0.94096276, 0.9400545, 0.93642144, 0.9373297,
0.9373297, 0.9400545, 0.93006358, 0.9400545, 0.93823797,
0.93914623, 0.93278837, 0.93097184, 0.93097184, 0.92733878,
0.92824705, 0.92279746, 0.92824705, 0.91825613, 0.92733878,
0.92643052, 0.92733878, 0.93278837, 0.92733878, 0.92824705,
0.93097184, 0.93278837, 0.93914623, 0.93097184, 0.9373297,
0.92915531, 0.93188011, 0.93551317, 0.94096276, 0.93642144,
0.93642144, 0.9346049, 0.93369664, 0.93369664, 0.93278837,
0.93006358, 0.93278837, 0.93006358, 0.9346049, 0.92824705,
0.93097184, 0.93006358, 0.93188011, 0.93278837, 0.93006358,
0.92915531, 0.92824705, 0.92733878, 0.92643052, 0.93188011,
0.93006358, 0.9346049, 0.93188011]
assert_array_almost_equal(q1q2.timeseries[:, 1], q1_expected)
q2_expected = [0.94649446, 0.94926199, 0.95295203, 0.95110701, 0.94833948,
0.95479705, 0.94926199, 0.9501845, 0.94926199, 0.95387454,
0.95202952, 0.95110701, 0.94649446, 0.94095941, 0.94649446,
0.9400369, 0.94464945, 0.95202952, 0.94741697, 0.94649446,
0.94188192, 0.94188192, 0.93911439, 0.94464945, 0.9400369,
0.94095941, 0.94372694, 0.93726937, 0.93819188, 0.93357934,
0.93726937, 0.93911439, 0.93911439, 0.93450185, 0.93357934,
0.93265683, 0.93911439, 0.94372694, 0.93911439, 0.94649446,
0.94833948, 0.95110701, 0.95110701, 0.95295203, 0.94926199,
0.95110701, 0.94926199, 0.94741697, 0.95202952, 0.95202952,
0.95202952, 0.94741697, 0.94741697, 0.94926199, 0.94280443,
0.94741697, 0.94833948, 0.94833948, 0.9400369, 0.94649446,
0.94741697, 0.94926199, 0.95295203, 0.94926199, 0.9501845,
0.95664207, 0.95756458, 0.96309963, 0.95756458, 0.96217712,
0.95756458, 0.96217712, 0.96586716, 0.96863469, 0.96494465,
0.97232472, 0.97140221, 0.9695572, 0.97416974, 0.9695572,
0.96217712, 0.96771218, 0.9704797, 0.96771218, 0.9695572,
0.97140221, 0.97601476, 0.97693727, 0.98154982, 0.98431734,
0.97601476, 0.9797048, 0.98154982, 0.98062731, 0.98431734,
0.98616236, 0.9898524, 1.]
assert_array_almost_equal(q1q2.timeseries[:, 2], q2_expected)
| alejob/mdanalysis | testsuite/MDAnalysisTests/analysis/test_contacts.py | Python | gpl-2.0 | 14,483 | [
"MDAnalysis"
] | f60a7d1b576ff2e9fc510f76aa253be7a8bc2c8c7c9f184e16807872a71cde0b |
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
# Common imports
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyMinimization(alpha, beta):
NumberMCcycles= 10000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = np.zeros((2), np.double)
DeltaPsi = np.zeros((2), np.double)
DerivativePsiE = np.zeros((2), np.double)
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# guess for variational parameters
alpha = 0.95
beta = 0.3
# Set up iteration using stochastic gradient method
Energy = 0
EDerivative = np.zeros((2), np.double)
# Learning rate eta, max iterations, need to change to adaptive learning rate
eta = 0.01
MaxIterations = 50
iter = 0
Energies = np.zeros(MaxIterations)
EnergyDerivatives1 = np.zeros(MaxIterations)
EnergyDerivatives2 = np.zeros(MaxIterations)
AlphaValues = np.zeros(MaxIterations)
BetaValues = np.zeros(MaxIterations)
while iter < MaxIterations:
Energy, EDerivative = EnergyMinimization(alpha,beta)
alphagradient = EDerivative[0]
betagradient = EDerivative[1]
alpha -= eta*alphagradient
beta -= eta*betagradient
Energies[iter] = Energy
EnergyDerivatives1[iter] = EDerivative[0]
EnergyDerivatives2[iter] = EDerivative[1]
AlphaValues[iter] = alpha
BetaValues[iter] = beta
iter += 1
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
pd.set_option('max_columns', 6)
data ={'Alpha':AlphaValues,'Beta':BetaValues,'Energy':Energies,'Alpha Derivative':EnergyDerivatives1,'Beta Derivative':EnergyDerivatives2}
frame = pd.DataFrame(data)
print(frame)
| CompPhysics/ComputationalPhysics2 | doc/src/MCsummary/src/qdoteminim.py | Python | cc0-1.0 | 5,916 | [
"Gaussian"
] | bccdb1ddbcabcd778f43adf4fdd17126fe42d6f4f1963d3735f1543139d81c76 |
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
from datetime import datetime
import xbmcgui
import controls
import contextmenu
import channelgui
import settings
import addonsettings
import guicontroller
import envcontroller
import update
import updater
from config import Config
from environments import Environments
from helpers.stopwatch import StopWatch
from helpers.channelimporter import ChannelImporter
from helpers.languagehelper import LanguageHelper
from helpers.statistics import Statistics
from logger import Logger
#noinspection PyMethodOverriding
class GUI(xbmcgui.WindowXML):
"""This class is the GUI representation of the window that shows the channels
and the related programs for that channel.
"""
#noinspection PyUnusedLocal,PyMissingConstructor
def __init__(self, strXMLname, strFallbackPath, strDefaultName, bforeFallback=0):
"""Initialisation of the class. All class variables should be instantiated here
WindowXMLDialog(self, xmlFilename, scriptPath[, defaultSkin, defaultRes]) -- Create a new WindowXMLDialog script.
xmlFilename : string - the name of the xml file to look for.
scriptPath : string - path to script. used to fallback to if the xml doesn't exist in the current skin. (eg os.getcwd())
defaultSkin : [opt] string - name of the folder in the skins path to look in for the xml. (default='Default')
defaultRes : [opt] string - default skins resolution. (default='720p')
*Note, skin folder structure is eg(resources/skins/Default/720p)
"""
try:
self.mainlistItems = []
self.initialised = False
self.contextMenu = True
self.channelGUIs = []
self.channelButtonRegister = []
self.activeChannelGUI = None
self.selectedChannelIndex = 0
self.listMode = ProgListModes.Normal # 1 # 1=normal, 2=favorites
self.combinedScreen = False
# create the main episode window
self.episodeWindow = channelgui.ChannelGui(Config.appChannelSkin, Config.rootDir, Config.skinFolder)
# get the channels
self.channelGUIs = ChannelImporter.GetRegister().GetChannels()
# now that they are ordered: get the buttoncodes. So the order in the buttoncode
# list is the same!
for channel in self.channelGUIs:
if channel.buttonID > 0:
self.channelButtonRegister.append(channel.buttonID)
self.panelViewEnabled = self.channelButtonRegister == []
# gui controller
self.guiController = guicontroller.GuiController(self)
# set the background
self.guiController.SetBackground(addonsettings.AddonSettings().BackgroundImageChannels())
Logger.Info("Starting %s ProgWindow with Fallback=%s and DefaultName=%s", Config.appName, strFallbackPath, strDefaultName)
except:
Logger.Debug("Error in __init__ of ProgWindow", exc_info=True)
def onInit(self):
"""Initialisation of class after the GUI has been loaded."""
try:
Logger.Debug("ProgWindow :: OnInit")
# check, if there are buttons registerd, and if there are, if
# the buttoncount is the same as the channelcount
if self.channelButtonRegister:
if len(self.channelButtonRegister) != len(ChannelImporter.GetRegister().GetChannels()):
Logger.Critical("The number of buttons that were registered is not the same as the number of channels")
self.close()
if not self.initialised:
Logger.Debug("Doing first initialisation of ProgWindow")
# hide programlist
self.ChannelListVisible(True)
self.combinedScreen = self.DetectDualMode()
# set initialvalues
# self.DisplayGUIs()
# Check if the selected index is still valid?
self.selectedChannelIndex = self.getCurrentListPosition()
# Logger.Debug("Current ChannelIndex: %s", self.selectedChannelIndex)
if self.selectedChannelIndex >= len(self.channelGUIs):
Logger.Warning("Current ChannelIndex is too large (index %s >= len %s). Resetting to 0", self.selectedChannelIndex, len(self.channelGUIs))
self.selectedChannelIndex = 0
self.activeChannelGUI = self.channelGUIs[self.selectedChannelIndex]
if self.selectedChannelIndex > 0:
self.guiController.SetChannelProperties(self.activeChannelGUI)
# else:
# colorDiff = settings.AddonSettings().GetDimPercentage()
# Logger.Debug("Setting DimBackground to %s", colorDiff)
# self.setProperty("XOT_DimBackground", colorDiff)
self.initialised = True
# this is a work around for a recent bug that was introduced
if self.getControl(controls.CH_LIST).size() < 1:
Logger.Debug("Somehow the list was cleared...filling it again")
self.DisplayGUIs()
self.setCurrentListPosition(self.selectedChannelIndex)
except:
Logger.Error("Error Initializing Progwindow", exc_info=True)
def onAction(self, action):
"""Handles the user <action> for the channelGUI.
Arguments:
action : Action - The action that was done.
Action Method for handling all <action>s except the clicking. This one should only
be inherited, not overwritten.
"""
try:
# get the FocusID
try:
controlID = self.getFocusId()
except:
Logger.Error("Unknown focusID for action ID: %s and ButtonCode: %s", action.getId(), action.getButtonCode())
return
#===============================================================================
# Handle Back actions
#===============================================================================
if action in controls.ACTION_BACK_CONTROLS or action in controls.ACTION_EXIT_CONTROLS:
Logger.Debug("Going back a level")
if self.mainlistItems == [] or not self.panelViewEnabled or self.combinedScreen:
Logger.Debug("Closing ProgWindow")
self.close()
else:
# hide programlist and show channelpannel
Logger.Debug("Switching ProgWindow Mode")
self.activeChannelGUI = None
self.mainlistItems = []
self.ChannelListVisible(True)
self.listMode = ProgListModes.Normal
elif action in controls.ACTION_CONTEXT_MENU_CONTROLS:
Logger.Debug("Showing contextmenu")
self.onActionFromContextMenu(controlID)
#===============================================================================
# Handle UP/Down on mainlist
#===============================================================================
# elif (action in controls.ACTION_UPDOWN or action in controls.ACTION_LEFTRIGHT or action in controls.ACTION_MOUSE_MOVEMENT) and controlID == controls.CH_LIST and (self.mainlistItems == [] or self.combinedScreen):
elif (action in controls.ACTION_UPDOWN or action in controls.ACTION_LEFTRIGHT) and controlID == controls.CH_LIST and (self.mainlistItems == [] or self.combinedScreen):
# Determine the active channel only when EP_LIST is in focus
# self.selectedChannelIndex = self.getCurrentListPosition()
# self.activeChannelGUI = self.channelGUIs[self.selectedChannelIndex]
if not self.combinedScreen:
self.guiController.SetChannelProperties(self.activeChannelGUI)
# self.ShowChannelInfo()
#===============================================================================
# Handle onClicks
#===============================================================================
# elif action == controls.ACTION_SELECT_ITEM:
# Logger.Debug("Progwindow :: Performing a SelectItem")
# # handle the onClicks. Because we use a WrapList the onClick also triggers
# # an onAction, causing some problems. That is why we handle onclicks here now.
# # normally the onClick occurs and then the onAction
# #self.onSelect(controlID)
else:
if not action.getId() in controls.ACTION_MOUSE_MOVEMENT:
Logger.Critical("OnAction::unknow action (id=%s). Do not know what to do", action.getId())
except:
Logger.Critical("OnAction Error", exc_info=True)
self.close()
def onSelect(self, controlID):
"""Handles the onSelect from the GUI
Arguments:
controlID : integer - the ID of the control that got the focus.
"""
Logger.Debug("onSelect on ControlID=%s", controlID)
#===============================================================================
# Handle main lists
#===============================================================================
if controls.EP_LIST <= controlID <= controls.EP_LIST + 9: # and self.mainlistItems==[]:
startTime = datetime.now()
# set the active channel in case no up/down was done!
self.mainlistItems = []
self.listMode = ProgListModes.Normal
self.selectedChannelIndex = self.getCurrentListPosition()
self.activeChannelGUI = self.channelGUIs[self.selectedChannelIndex]
self.guiController.SetChannelProperties(self.activeChannelGUI)
# Get MainlistItems
didWeStartEmpty = len(self.activeChannelGUI.mainListItems) == 0
try:
self.mainlistItems = self.activeChannelGUI.ParseMainList()
except:
Logger.Error("Error fetching mainlist", exc_info=True)
self.mainlistItems = []
self.ShowListItems(self.mainlistItems)
# hide Main ChannelList and show ProgramList
self.ChannelListVisible(False)
# call for statistics
if didWeStartEmpty:
Statistics.RegisterChannelOpen(self.activeChannelGUI, startTime)
# if mainlist is not empty, then the episodewindow should be dispalyed
elif controlID == controls.PR_LIST and self.mainlistItems != []:
selectedPosition = self.getControl(controls.PR_LIST).getSelectedPosition()
if self.listMode == ProgListModes.Favorites:
if selectedPosition > len(self.favoriteItems):
Logger.Error("Favorites list does not have %s items, so item %s cannot be selected", selectedPosition, selectedPosition)
return
selectedItem = self.favoriteItems[selectedPosition]
else:
selectedItem = self.mainlistItems[selectedPosition]
Logger.Info('opening episode list GUI with item = %s', selectedItem)
self.episodeWindow.ShowChannelWithUrl(self.activeChannelGUI, selectedItem)
#===============================================================================
# check if a button that was registered was pressed!
#===============================================================================
elif controlID in self.channelButtonRegister:
# set the active channel in case no up/down was done!
self.selectedChannelIndex = self.channelButtonRegister.index(controlID)
self.activeChannelGUI = self.channelGUIs[self.selectedChannelIndex]
self.getControl(controls.PR_LIST).reset()
self.ChannelListVisible(False)
# Get MainlistItems
self.mainlistItems = self.activeChannelGUI.ParseMainList()
for m in self.mainlistItems:
self.getControl(controls.PR_LIST).addItem(xbmcgui.ListItem(m.name, "", m.icon, m.icon))
def onClick(self, controlID):
"""Handles the clicking of an item in control with <controlID>.
Arguments:
controlID : integer - the ID of the control that got the click.
This method is used to catch the clicking (Select/OK) in the lists. It then
calls the correct methods.
"""
try:
Logger.Debug("Progwindow :: onClick ControlID=%s", controlID)
self.onSelect(controlID)
except:
Logger.Critical("Error handling onClick on controlID=%s", controlID, exc_info=True)
def onFocus(self, controlID):
"""Handles focus changes to a control with <controlID>.
Arguments:
controlID : integer - the ID of the control that got the focus.
"""
try:
# Logger.Debug("onFocus :: Control %s has focus now", controlID)
pass
except:
Logger.Critical("Error handling onFocus on ControlID=%s", controlID, exc_info=True)
def close(self):
"""close(self) -- Closes this window.
Closes this window by activating the old window.
The window is not deleted with this method.
Also the logfile is closed here.
"""
Logger.Instance().CloseLog()
xbmcgui.WindowXML.close(self)
def getCurrentListPosition(self):
"""overload method to get stuff working in some rare x64 cases
There are some issues with the return value -1 from the xbmcgui method
xbmcgui.WindowXML.getCurrentListPosition(). In some x64 cases it returns
the value 4294967295 (0xFFFFFFFF) instead of -1. This method catches
this issue and returns "value - 0x100000000" is >= -1.
Otherwise it just returns xbmcgui.WindowXML.getCurrentListPosition()
"""
position = xbmcgui.WindowXML.getCurrentListPosition(self)
possiblePosition = position - 0x100000000
if possiblePosition >= -1:
Logger.Warning("CurrentListPosition is too large (%s). New value determined: %s", position, possiblePosition)
return possiblePosition
return position
#===============================================================================
# Contextmenu stuff
#===============================================================================
#noinspection PyUnboundLocalVariable
def onActionFromContextMenu(self, controlID):
"""Handles the actions that were chosen from the contectmenu."""
if self.contextMenu is False:
return None
contextMenuItems = []
# determine who called the menu
if controlID != controls.CH_LIST:
selectedIndex = self.getControl(controls.PR_LIST).getSelectedPosition()
parentControl = self.getFocus()
# determine if favorites are enabled
favs = LanguageHelper.GetLocalizedString(LanguageHelper.FavouritesId)
if self.listMode == ProgListModes.Normal:
show = LanguageHelper.GetLocalizedString(LanguageHelper.ShowId)
add = LanguageHelper.GetLocalizedString(LanguageHelper.AddToId)
contextMenuItems.append(contextmenu.ContextMenuItem("%s %s" % (show, favs), "CtMnShowFavorites"))
contextMenuItems.append(contextmenu.ContextMenuItem("%s %s" % (add, favs), "CtMnAddToFavorites"))
# add the refresh button
contextMenuItems.append(contextmenu.ContextMenuItem(LanguageHelper.GetLocalizedString(LanguageHelper.RefreshListId), "CtMnRefresh"))
else:
hide = LanguageHelper.GetLocalizedString(LanguageHelper.HideId)
remove = LanguageHelper.GetLocalizedString(LanguageHelper.RemoveId)
fav = LanguageHelper.GetLocalizedString(LanguageHelper.FavouriteId)
contextMenuItems.append(contextmenu.ContextMenuItem("%s %s" % (hide, favs), "CtMnHideFavorites"))
contextMenuItems.append(contextmenu.ContextMenuItem("%s %s" % (remove, fav), "CtMnRemoveFromFavorites"))
# add the refresh button for favorites
contextMenuItems.append(contextmenu.ContextMenuItem(LanguageHelper.GetLocalizedString(LanguageHelper.RefreshListId), "CtMnShowFavorites"))
if controlID == controls.CH_LIST or self.combinedScreen:
if controlID == controls.CH_LIST:
# channel list, so pass the channelindex
selectedIndex = self.getCurrentListPosition()
else:
# combined screen, pass the index of the program
selectedIndex = self.getControl(controls.PR_LIST).getSelectedPosition()
parentControl = self.getFocus() # self.getControl(controls.CH_LIST_WRAPPER)
if envcontroller.EnvController.IsPlatform(Environments.Xbox):
langChannel = LanguageHelper.GetLocalizedString(LanguageHelper.ChannelsId)
contextMenuItems.append(contextmenu.ContextMenuItem("Update %s" % (langChannel,), "CtMnUpdateChannels"))
contextMenuItems.append(contextmenu.ContextMenuItem(LanguageHelper.GetLocalizedString(LanguageHelper.CheckUpdatesId), "CtMnUpdateXOT"))
contextMenuItems.append(contextmenu.ContextMenuItem(LanguageHelper.GetLocalizedString(LanguageHelper.AddOnSettingsId), "CtMnSettingsXOT"))
# build menuitems
contextMenu = contextmenu.GUI(Config.contextMenuSkin, Config.rootDir, Config.skinFolder, parent=parentControl, menuItems=contextMenuItems)
selectedItem = contextMenu.selectedItem
del contextMenu
# handle function from items
if selectedItem is not None and selectedItem > -1:
selectedMenuItem = contextMenuItems[selectedItem]
functionString = "self.%s(%s)" % (selectedMenuItem.functionName, selectedIndex)
Logger.Debug("Calling %s", functionString)
try:
exec functionString
except:
Logger.Error("onActionFromContextMenu :: Cannot execute '%s'.", functionString, exc_info=True)
return None
#noinspection PyUnusedLocal
def CtMnShowFavorites(self, selectedIndex):
"""Shows the favorites for the selected channel
Arguments:
selectedIndex : integer - the index of the currently selected item
for the channel. Not used here.
"""
self.listMode = ProgListModes.Favorites
# Get Favorites
self.favoriteItems = settings.LoadFavorites(self.activeChannelGUI)
self.ShowListItems(self.favoriteItems)
#noinspection PyUnusedLocal
def CtMnHideFavorites(self, selectedIndex):
"""Hides the favorites for the selected channel
Arguments:
selectedIndex : integer - the index of the currently selected item
for the channel. Not used here.
"""
self.listMode = ProgListModes.Normal
self.ShowListItems(self.mainlistItems)
def CtMnAddToFavorites(self, selectedIndex):
"""Add the selected item to the the favorites for the selected channel
Arguments:
selectedIndex : integer - the index of the currently selected item
"""
settings.AddToFavorites(self.mainlistItems[selectedIndex], self.activeChannelGUI)
def CtMnRemoveFromFavorites(self, selectedIndex):
"""Remove the selected item from the the favorites for the selected channel
Arguments:
selectedIndex : integer - the index of the currently selected item
"""
settings.RemoveFromFavorites(self.favoriteItems[selectedIndex], self.activeChannelGUI)
# reload the items
self.favoriteItems = settings.LoadFavorites(self.activeChannelGUI)
self.ShowListItems(self.favoriteItems)
#noinspection PyUnusedLocal
def CtMnUpdateXOT(self, selectedIndex):
"""Checks for new XOT framework updates.
Shows a popup if a new version is available.
Arguments:
selectedIndex : integer - the index of the currently selected item this
one is not used here.
"""
update.CheckVersion(Config.version, Config.updateUrl, verbose=True)
#noinspection PyUnusedLocal
def CtMnRefresh(self, selectedIndex):
"""Refreshes the currenlty shown list
Arguments:
selectedIndex : integer - the index of the currently selected item this
one is not used here.
"""
Logger.Debug("Refreshing current list")
if not self.activeChannelGUI is None:
self.activeChannelGUI.mainListItems = []
try:
self.mainlistItems = self.activeChannelGUI.ParseMainList()
except:
Logger.Error("Error fetching mainlist", exc_info=True)
self.mainlistItems = []
self.ShowListItems(self.mainlistItems)
else:
Logger.Debug("Cannot refresh a list without a channel.")
#noinspection PyUnusedLocal
def CtMnSettingsXOT(self, selectedIndex):
"""Shows the Add-On Settings dialog.
Arguments:
selectedIndex : integer - the index of the currently selected item this
one is not used here.
"""
addonsettings.AddonSettings().ShowSettings()
# set the background, in case it changed
self.guiController.SetBackground(addonsettings.AddonSettings().BackgroundImageChannels())
return
#noinspection PyUnusedLocal
def CtMnUpdateChannels(self, selectedIndex):
"""Shows the XOT Channel update dialog (only for XBMC4Xbox).
Arguments:
selectedIndex : integer - the index of the currently selected item this
one is not used here.
"""
updaterWindow = updater.Updater(Config.updaterSkin, Config.rootDir, Config.skinFolder)
updaterWindow .doModal()
del updaterWindow
#===============================================================================
# Fill the channels
#===============================================================================
def DisplayGUIs(self):
"""Shows the channels for that are available in XOT."""
timer = StopWatch("Progwindow :: showing channels", Logger.Instance())
self.clearList()
xbmcgui.lock()
try:
for channelGUI in self.channelGUIs:
tmp = xbmcgui.ListItem(channelGUI.channelName, "", channelGUI.icon, channelGUI.iconLarge)
tmp.setProperty("XOT_ChannelDescription", channelGUI.channelDescription)
Logger.Trace("Adding %s", channelGUI.channelName)
self.addItem(tmp)
finally:
xbmcgui.unlock()
timer.Stop()
def ShowListItems(self, items):
"""Displays a list of items in the Program list.
Arguments:
items : list[MediaItem] - the MediaItems to show in the list.
"""
guiController = guicontroller.GuiController(self)
guiController.DisplayProgramList(items)
#===============================================================================
def DetectDualMode(self):
"""Detects if there are 2 lists to show/hide or just a combined screen
XOT 3.2.4 introduced a combined Channel/Program window. This method
detects the old situation (seperated lists) or the new situation
(combined screen).
Returns:
True if a combination screen is available.
"""
try:
self.getControl(controls.CH_LIST_WRAPPER).getId()
self.getControl(controls.PR_LIST_WRAPPER).getId()
Logger.Debug("Progwindow :: seperate lists are available")
return False
except (TypeError, RuntimeError):
Logger.Debug("Progwindow :: combined screen is available")
return True
def ChannelListVisible(self, visibility):
"""Shows or hides the channels
Arguments:
visibility : boolean - Whether the channels should be visible or not.
If the <visibility> is set to True, then the channels are shown and the
program list is hidden.
"""
try:
if visibility:
Logger.Debug("Showing Channels")
self.getControl(controls.CH_LIST_WRAPPER).setVisible(True)
# self.getControl(controls.PR_LIST_WRAPPER).setVisible(False)
self.setFocusId(controls.CH_LIST_WRAPPER)
else:
Logger.Debug("Hiding Channels")
self.getControl(controls.CH_LIST_WRAPPER).setVisible(False)
# self.getControl(controls.PR_LIST_WRAPPER).setVisible(True)
except (TypeError, RuntimeError):
# is no wrappers are available, don't do anything
pass
if visibility:
self.setFocusId(controls.CH_LIST)
else:
self.setFocusId(controls.PR_LIST)
return
#===============================================================================
# Progwindow Enumeration
#===============================================================================
class ProgListModes:
"""The class is used to create a ProgListModesEnum object that behaves
like a real Enumeration (like the C# kind).
"""
def __init__(self):
raise NotImplementedError("Enums only")
Normal = 1
Favorites = 2
| SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/progwindow.py | Python | gpl-2.0 | 27,145 | [
"VisIt"
] | 56cd71ae87976575c579dd768751366c34df9d38ce8f90cae0de135bc427e86d |
"""
menu.py
Class instance used to create menu for ARTview app.
"""
import numpy as np
import pyart
import os
import sys
from ..core import Variable, Component, common, QtGui, QtCore, componentsList
class Menu(Component):
'''Class to display the MainMenu.'''
Vradar = None #: see :ref:`shared_variable`
Vgrid = None #: see :ref:`shared_variable`
def __init__(self, pathDir=None, filename=None, Vradar=None, Vgrid=None,
mode=["Radar"], name="Menu", parent=None):
'''
Initialize the class to create the interface.
Parameters
----------
[Optional]
pathDir : string
Input directory path to open. If None user current directory
filename : string, False or None
File to open as first. None will open file dialog. False will
open no file.
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable.
A value of None initializes an empty Variable.
Vgrid : :py:class:`~artview.core.core.Variable` instance
Grid signal variable.
A value of None initializes an empty Variable.
mode : list
List with strings "Radar" or "Grid". Determine which type of files
will be open
name : string
Menu name.
parent : PyQt instance
Parent instance to associate to menu.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
Notes
-----
This class creates the main application interface and creates
a menubar for the program.
'''
super(Menu, self).__init__(name=name, parent=parent)
# Set some parameters
if pathDir is None:
pathDir = os.getcwd()
self.dirIn = pathDir
self.fileindex = 0
self.filelist = []
self.mode = []
for m in mode:
self.mode.append(m.lower())
self.Vradar = Vradar
self.Vgrid = Vgrid
self.sharedVariables = {"Vradar": None,
"Vgrid": None}
# Show an "Open" dialog box and return the path to the selected file
# Just do that if Vradar was not given
if self.Vradar is None:
self.Vradar = Variable(None)
if self.Vgrid is None:
self.Vgrid = Variable(None)
if Vradar is None and Vgrid is None and self.mode:
if filename is None:
self.showFileDialog()
elif filename is False:
pass
else:
self.filename = filename
self._openfile()
# Launch the GUI interface
self.LaunchApp()
self.resize(300, 180)
self.show()
def keyPressEvent(self, event):
'''Change data file with left and right arrow keys.'''
if event.key() == QtCore.Qt.Key_Right:
# Menu control the file and open the radar
self.AdvanceFileSelect(self.fileindex + 1)
elif event.key() == QtCore.Qt.Key_Left:
# Menu control the file and open the radar
self.AdvanceFileSelect(self.fileindex - 1)
else:
QtGui.QWidget.keyPressEvent(self, event)
####################
# GUI methods #
####################
def LaunchApp(self):
'''Launches a GUI interface.'''
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Create the menus
self.CreateMenu()
# Create layout
if sys.version_info < (2, 7, 0):
self.central_widget = QtGui.QWidget()
self.setCentralWidget(self.central_widget)
self.centralLayout = QtGui.QVBoxLayout(self.central_widget)
self.centralLayout.setSpacing(8)
self.frames = {}
self.addLayoutMenu()
else:
self.tabWidget = QtGui.QTabWidget()
self.setCentralWidget(self.tabWidget)
# self.tabWidget.setAcceptDrops(True)
self.tabWidget.setTabsClosable(True)
self.tabWidget.tabCloseRequested.connect(self.removeTab)
self.tabWidget.tabBar().setMovable(True)
def removeTab(self, idx):
widget = self.tabWidget.widget(idx)
self.tabWidget.removeTab(idx)
widget.close()
def showFileDialog(self):
'''Open a dialog box to choose file.'''
filename = QtGui.QFileDialog.getOpenFileName(
self, 'Open file', self.dirIn)
filename = str(filename)
if filename == '':
return
else:
self.filename = filename
self._openfile()
def saveRadar(self):
'''
Open a dialog box to save radar file.
Parameters
----------
input : Vradar instance
Optional parameter to allow access from
other ARTView plugins, etc.
'''
filename = QtGui.QFileDialog.getSaveFileName(
self, 'Save Radar File', self.dirIn)
filename = str(filename)
if filename == '' or self.Vradar.value is None:
return
else:
pyart.io.write_cfradial(filename, self.Vradar.value)
print("Saved %s" % (filename))
def saveGrid(self):
'''Open a dialog box to save grid file.'''
filename = QtGui.QFileDialog.getSaveFileName(
self, 'Save grid File', self.dirIn)
filename = str(filename)
if filename == '' or self.Vgrid.value is None:
return
else:
pyart.io.write_grid(filename, self.Vgrid.value)
def addLayoutWidget(self, widget):
'''
Add a widget to central layout.
This function is to be called both internal and external.
'''
if sys.version_info < (2, 7, 0):
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.Box)
layout = QtGui.QVBoxLayout(frame)
layout.addWidget(widget)
self.frames[widget.__repr__()] = widget
self.centralLayout.addWidget(widget)
self.addLayoutMenuItem(widget)
widget.show()
else:
self.tabWidget.addTab(widget, widget.name)
def removeLayoutWidget(self, widget):
'''Remove widget from central layout.'''
frame = self.frames[widget.__repr__()]
self.centralLayout.removeWidget(frame)
self.removeLayoutMenuItem(widget)
frame.close()
widget.close()
widget.deleteLater()
def addComponent(self, Comp, label=None):
'''Add Component Contructor. If label is None, use class name.'''
# first test the existence of a guiStart
if not hasattr(Comp, 'guiStart'):
raise ValueError("Component has no guiStart Method")
return
self.addPluginMenuItem(Comp)
######################
# Menu build methods #
######################
def menus(self):
return (self.menubar, self.subMenus(self.menubar))
def subMenus(self, menu):
''' get submenu list of menu as dictionary. '''
if menu is None:
return None
menus = {}
for act in menu.actions():
menus[str(act.text())] = (act.menu(), self.subMenus(act.menu()))
return menus
def addMenuAction(self, position, *args):
menu, menus = self.menus()
for key in position:
if key in menus:
menu, menus = menus[key]
else:
menu = menu.addMenu(key)
menus = {}
return menu.addAction(*args)
def CreateMenu(self):
'''Create the main menubar.'''
self.menubar = self.menuBar()
self.addFileMenu()
self.addAboutMenu()
self.addFileAdvanceMenu()
def addFileMenu(self):
'''Add the File Menu to menubar.'''
self.filemenu = self.menubar.addMenu('File')
if self.mode:
openFile = QtGui.QAction('Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showFileDialog)
self.filemenu.addAction(openFile)
if "radar" in self.mode:
saveRadar = QtGui.QAction('Save Radar', self)
saveRadar.setStatusTip('Save Radar to Cf/Radial NetCDF')
saveRadar.triggered.connect(self.saveRadar)
self.filemenu.addAction(saveRadar)
if "grid" in self.mode:
saveGrid = QtGui.QAction('Save Grid', self)
saveGrid.setStatusTip('Save Grid NetCDF')
saveGrid.triggered.connect(self.saveGrid)
self.filemenu.addAction(saveGrid)
exitApp = QtGui.QAction('Close', self)
exitApp.setShortcut('Ctrl+Q')
exitApp.setStatusTip('Exit ARTview')
exitApp.triggered.connect(self.close)
self.filemenu.addAction(exitApp)
def addAboutMenu(self):
'''Add Help menu to menubar.'''
self.aboutmenu = self.menubar.addMenu('About')
self._aboutArtview = QtGui.QAction('ARTview', self)
self._aboutArtview.setStatusTip('About ARTview')
self._aboutArtview.triggered.connect(self._about)
self.RadarShort = QtGui.QAction('Show Short Radar Info', self)
self.RadarShort.setStatusTip('Print Short Radar Structure Info')
self.RadarShort.triggered.connect(self._get_RadarShortInfo)
self.RadarLong = QtGui.QAction('Print Long Radar Info', self)
self.RadarLong.setStatusTip('Print Long Radar Structure Info')
self.RadarLong.triggered.connect(self._get_RadarLongInfo)
self.PluginHelp = QtGui.QAction('Plugin Help', self)
self.PluginHelp.triggered.connect(self._get_pluginhelp)
self.aboutmenu.addAction(self._aboutArtview)
self.aboutmenu.addAction(self.RadarShort)
self.aboutmenu.addAction(self.RadarLong)
self.aboutmenu.addAction(self.PluginHelp)
def addLayoutMenu(self):
'''Add Layout Menu to menubar.'''
self.layoutmenu = self.menubar.addMenu('&Layout')
self.layoutmenuItems = {}
def addLayoutMenuItem(self, widget):
'''Add widget item to Layout Menu.'''
if hasattr(widget, 'name'):
item = self.layoutmenu.addMenu(widget.name)
else:
item = self.layoutmenu.addMenu(widget.__str__())
self.layoutmenuItems[widget.__repr__()] = item
remove = item.addAction("remove")
remove.triggered[()].connect(
lambda widget=widget: self.removeLayoutWidget(widget))
def removeLayoutMenuItem(self, widget):
'''Remove widget item from Layout Menu.'''
rep = widget.__repr__()
if rep in self.layoutmenuItems:
self.layoutmenuItems[rep].clear()
self.layoutmenu.removeAction(
self.layoutmenuItems[rep].menuAction())
self.layoutmenuItems[rep].close()
del self.layoutmenuItems[rep]
def addPluginMenuItem(self, Comp, label=None):
'''Add Component item to Component Menu.
If label is None use class name.'''
# XXX this function is broken and need to be removed
if label is None:
label = Comp.__name__
action = self.pluginmenu.addAction(label)
action.triggered[()].connect(
lambda Comp=Comp: self.startComponent(Comp))
def startComponent(self, Comp):
'''Execute the GUI start of Component and
add to layout if not independent.'''
comp, independent = Comp.guiStart(self)
if not independent:
self.addLayoutWidget(comp)
def change_mode(self, new_mode):
''' Open and connect new components to satisfy mode.
Parameters
----------
new_mode: see file artview/modes.py for documentation on modes
'''
components = new_mode[0][:]
links = new_mode[1]
static_comp_list = componentsList[:]
# find already running components
for i, component in enumerate(components):
flag = False
for j, comp in enumerate(static_comp_list):
if isinstance(comp, component):
components[i] = static_comp_list.pop(j)
flag = True
break
if not flag:
# if there is no component open
print("starting component: %s" % component.__name__)
from ..core.core import suggestName
name = suggestName(components[i])
components[i] = components[i](name=name, parent=self)
self.addLayoutWidget(components[i])
for link in links:
dest = getattr(components[link[0][0]], link[0][1])
orin = getattr(components[link[1][0]], link[1][1])
if dest is orin:
# already linked
pass
else:
# not linked, link
print("linking %s.%s to %s.%s" %
(components[link[1][0]].name, link[1][1],
components[link[0][0]].name, link[0][1]))
# Disconect old Variable
components[link[1][0]].disconnectSharedVariable(link[1][1])
# comp1.var = comp0.var
setattr(components[link[1][0]], link[1][1], dest)
# Connect new Variable
components[link[1][0]].connectSharedVariable(link[1][1])
# Emit change signal
dest.update()
def addFileAdvanceMenu(self):
'''
Add menu to advance to next or previous file.
Or to go to the first or last file in the selected directory.'''
self.advancemenu = self.menubar.addMenu("Change file")
nextAction = self.advancemenu.addAction("Next")
nextAction.triggered[()].connect(
lambda findex=self.fileindex + 1: self.AdvanceFileSelect(findex))
prevAction = self.advancemenu.addAction("Previous")
prevAction.triggered[()].connect(
lambda findex=self.fileindex - 1: self.AdvanceFileSelect(findex))
firstAction = self.advancemenu.addAction("First")
firstAction.triggered[()].connect(
lambda findex=0: self.AdvanceFileSelect(findex))
lastAction = self.advancemenu.addAction("Last")
lastAction.triggered[()].connect(
lambda findex=(len(self.filelist) - 1):
self.AdvanceFileSelect(findex))
######################
# Help methods #
######################
def _about(self):
# Add a more extensive about eventually
# txOut = ("ARTview is a visualization package that leverages the\n"
# "DoE PyArt python software to view individual weather\n"
# "radar data files or to browse a directory of data.\n\n"
#
# "If you hover over butttons and menus with the mouse,\n"
# "more instructions and information are available.\n\n"
#
# "More complete documentation can be found at:\n"
# "https://rawgit.com/nguy/artview/master/docs/build"
# "/html/index.html\n")
# QtGui.QMessageBox.about(self, "About ARTview", txOut)
text = (
"<b>About ARTView</b><br><br>"
"ARTview is a visualization package that leverages the <br>"
"DoE Py-ART python software to view individual weather <br>"
"radar data files or to browse a directory of data.<br><br>"
"<i>Note</i>:<br>"
"Tooltip information is available if you hover over buttons <br> "
"and menus with the mouse.<br><br>"
"<i>Documentation</i>:<br>"
"<br><br>"
"For a demonstration, a "
"<a href='https://rawgit.com/nguy/artview/master/docs/build/html/index.html'>Software Package Documentation</a><br>"
)
common.ShowLongTextHyperlinked(text)
def _get_RadarLongInfo(self):
'''Print out the radar info to text box.'''
# Get the radar info form rada object and print it
txOut = self.Vradar.value.info()
print(txOut)
QtGui.QMessageBox.information(self, "Long Radar Info",
"See terminal window")
def _get_RadarShortInfo(self):
'''Print out some basic info about the radar.'''
# For any missing data
infoNA = "Info not available"
try:
rname = self.Vradar.value.metadata['instrument_name']
except:
rname = infoNA
try:
rlon = str(self.Vradar.value.longitude['data'][0])
except:
rlon = infoNA
try:
rlat = str(self.Vradar.value.latitude['data'][0])
except:
rlat = infoNA
try:
ralt = str(self.Vradar.value.altitude['data'][0])
raltu = self.Vradar.value.altitude['units'][0]
except:
ralt = infoNA
raltu = " "
try:
maxr = str(self.Vradar.value.instrument_parameters[
'unambiguous_range']['data'][0])
maxru = self.Vradar.value.instrument_parameters[
'unambiguous_range']['units'][0]
except:
maxr = infoNA
maxru = " "
try:
nyq = str(self.Vradar.value.instrument_parameters[
'nyquist_velocity']['data'][0])
nyqu = self.Vradar.value.instrument_parameters[
'nyquist_velocity']['units'][0]
except:
nyq = infoNA
nyqu = " "
try:
bwh = str(self.Vradar.value.instrument_parameters[
'radar_beam_width_h']['data'][0])
bwhu = self.Vradar.value.instrument_parameters[
'radar_beam_width_h']['units'][0]
except:
bwh = infoNA
bwhu = " "
try:
bwv = str(self.Vradar.value.instrument_parameters[
'radar_beam_width_v']['data'][0])
bwvu = self.Vradar.value.instrument_parameters[
'radar_beam_width_v']['units'][0]
except:
bwv = infoNA
bwvu = " "
try:
pw = str(self.Vradar.value.instrument_parameters[
'pulse_width']['data'][0])
pwu = self.Vradar.value.instrument_parameters[
'pulse_width']['units'][0]
except:
pw = infoNA
pwu = " "
try:
ngates = str(self.Vradar.value.ngates)
except:
ngates = infoNA
try:
nsweeps = str(self.Vradar.value.nsweeps)
except:
nsweeps = infoNA
txOut = (('Radar Name: %s\n' % rname) +
('Radar longitude: %s\n' % rlon) +
('Radar latitude: %s\n' % rlat) +
('Radar altitude: %s %s\n' % (ralt, raltu)) +
(' \n') +
('Unambiguous range: %s %s\n' % (maxr, maxru)) +
('Nyquist velocity: %s %s\n' % (nyq, nyqu)) +
(' \n') +
('Radar Beamwidth, horiz: %s %s\n' % (bwh, bwhu)) +
('Radar Beamwidth, vert: %s %s\n' % (bwv, bwvu)) +
('Pulsewidth: %s %s \n' % (pw, pwu)) +
(' \n') +
('Number of gates: %s\n' % ngates) +
('Number of sweeps: %s\n' % nsweeps))
QtGui.QMessageBox.information(self, "Short Radar Info", txOut)
def _get_pluginhelp(self):
'''Print out a short help text box regarding plugins.'''
text = (
"<b>Existing Plugins</b><br><br>"
"Current plugins can be found under the <i>Advanced Tools</i> "
"menu.<br>"
"Most plugins have a help button for useage information.<br>"
"<br><br>"
"<b>Creating a Custom Plugin</b><br><br>"
"ARTview allows the creation of custom user plugins.<br><br>"
"Instructions and examples can be found at:<br>"
"https://rawgit.com/nguy/artview/master/docs/build/html/"
"plugin_tutorial.html<br><br>"
"Please consider submitting your plugin for inclusion in "
"ARTview<br>"
" Submit a pull request if you forked the repo on Github or"
" Post the code in an Issue:<br>"
"https://github.com/nguy/artview/issues<br><br>")
common.ShowLongText(text)
########################
# Selectionion methods #
########################
def AdvanceFileSelect(self, findex):
'''Captures a selection and open file.'''
if findex > (len(self.filelist)-1):
print(len(self.filelist))
msg = "End of directory, cannot advance!"
common.ShowWarning(msg)
findex = (len(self.filelist) - 1)
return
if findex < 0:
msg = "Beginning of directory, must move forward!"
common.ShowWarning(msg)
findex = 0
return
self.fileindex = findex
self.filename = os.path.join(self.dirIn, self.filelist[findex])
self._openfile()
########################
# Menu display methods #
########################
def _openfile(self):
'''Open a file via a file selection window.'''
print("Opening file " + self.filename)
# Update to current directory when file is chosen
self.dirIn = os.path.dirname(self.filename)
# Get a list of files in the working directory
self.filelist = os.listdir(self.dirIn)
self.filelist.sort()
if os.path.basename(self.filename) in self.filelist:
self.fileindex = self.filelist.index(
os.path.basename(self.filename))
else:
self.fileindex = 0
# Read the data from file
radar_warning = False
grid_warning = False
if "radar" in self.mode:
try:
radar = pyart.io.read(self.filename, delay_field_loading=True)
# Add the filename for Display
radar.filename = self.filename
self.Vradar.change(radar)
return
except:
try:
radar = pyart.io.read(self.filename)
# Add the filename for Display
radar.filename = self.filename
self.Vradar.change(radar)
return
except:
import traceback
print(traceback.format_exc())
radar_warning = True
if "grid" in self.mode:
try:
grid = pyart.io.read_grid(
self.filename, delay_field_loading=True)
self.Vgrid.change(grid)
return
except:
try:
grid = pyart.io.read_grid(self.filename)
self.Vgrid.change(grid)
return
except:
import traceback
print(traceback.format_exc())
grid_warning = True
if grid_warning or radar_warning:
msg = "Py-ART didn't recognize this file!"
common.ShowWarning(msg)
else:
msg = "Could not open file, invalid mode!"
common.ShowWarning(msg)
return
| jjhelmus/artview | artview/components/menu.py | Python | bsd-3-clause | 23,563 | [
"NetCDF"
] | b807ad3dbcdabfec7175543e8c338174f0d84de08a21bbe13c131740b5e52bc0 |
# For this assignment you are asked to fit classification models to data from the Bank Marketing Study
# Focus is on Logistic Regression and Naive Bayes
# Set seed value for random number generators to obtain reproducible results
RANDOM_SEED = 85
# import packages
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn import metrics, cross_validation
from sklearn.naive_bayes import BernoulliNB, GaussianNB
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import KFold, train_test_split
import itertools
import matplotlib.pyplot as plt
# Import data set
path = 'C:/Users/sgran/Desktop/northwestern/predict_422/assignment_2/jump-start-bank-v001/'
data = pd.read_csv(path + 'bank.csv', sep = ';')
# Examine the shape of original input data
print('--Observations, Variables--\n', data.shape)
# look at the list of column names
print(data.info())
# look at the beginning of the DataFrame and quick stats
print('\n--Head of data set--\n', data.head())
print('\n--Descriptive Statistics--\n', data.describe())
# mapping function to convert text no/yes to integer 0/1
convert_to_binary = {'no' : 0, 'yes' : 1}
# define binary variable for having credit in default
credit_default = data['default'].map(convert_to_binary)
print('\nTotal defaults:', credit_default.sum())
print(round(100 * credit_default.sum() / data.shape[0]),
"% have credit in default")
# define binary variable for having a mortgage or housing loan
mortgage = data['housing'].map(convert_to_binary)
print('\nTotal with mortgage:', mortgage.sum())
print(round(100 * mortgage.sum() / data.shape[0]),
"% have a mortgage or housing loan")
# define binary variable for having a personal loan
loan = data['loan'].map(convert_to_binary)
print('\nTotal with personal loan:', loan.sum())
print(round(100 * loan.sum() / data.shape[0]),
"% have a personal loan")
# define response variable to use in the model
response = data['response'].map(convert_to_binary)
print('\nResponse counts:\n', data.response.value_counts())
print(round(100 * response.sum() / data.shape[0]),
"% subscribed to a term deposit")
# gather three explanatory variables and response
model_data = np.array([
np.array(credit_default),
np.array(mortgage),
np.array(loan),
np.array(response)
]).transpose()
# examine the shape of model_data
print('--Observations, Variables--\n', model_data.shape)
### LOGISTIC REGRESSION ###
print('\n\n### LOGISTIC REGRESSION ###\n')
X = model_data[:, 0:3]
y = model_data[:, 3]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
y_prob = log_reg.predict_proba(X_test)
print('\nProbabilities:\n', y_prob)
yhat = log_reg.predict(X_test)
print('\nNumber of yes predictions:', yhat.sum())
print('\nAccuracy of predictions:', metrics.accuracy_score(y_test, yhat))
print('\nConfusion Matrix:\n', metrics.confusion_matrix(y_test, yhat))
print('\nClassification Report:\n', metrics.classification_report(y_test, yhat))
print('\nAUROC:', roc_auc_score(y_test, y_prob[:, 1]))
# Plot confusion matrix
# Based on method from sklearn
def plot_confusion_matrix(cm, classes, model,
title='Confusion matrix: ',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title + model)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Plot non-normalized confusion matrix
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(
cm=metrics.confusion_matrix(y_test, yhat),
classes=['no','yes'],
model='Logistic Regression')
plt.show()
print('\n----Softmax----')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Try various methods of logistic regression
softmax_reg = LogisticRegression(
multi_class="multinomial",
solver="lbfgs",
C=10)
softmax_reg.fit(X_train, y_train)
y_prob2 = softmax_reg.predict_proba(X_test)
print('\nProbabilities:\n', y_prob2)
yhat2 = softmax_reg.predict(X_test)
print('\nNumber of yes predictions:', yhat2.sum())
print('\nAccuracy of predictions:', metrics.accuracy_score(y_test, yhat2))
print('\n----Cross Validation----')
yhat_cross = cross_validation.cross_val_predict(
LogisticRegression(),
X, y, cv=10
)
print('\nNumber of yes predictions:', yhat_cross.sum())
print('\nAccuracy of predictions:', metrics.accuracy_score(y, yhat_cross))
### NAIVE BAYES ###
print('\n\n### NAIVE BAYES ###\n')
print('\n----Bernoulli----')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
prior = response.sum() / data.shape[0]
clf = BernoulliNB(
alpha=1.0,
binarize=0.5,
class_prior=[1 - prior, prior],
fit_prior=False
)
clf.fit(X_train, y_train)
df = pd.DataFrame(X_test)
df.columns = ['credit_default', 'mortgage', 'loan']
df['response'] = y_test
# add predicted probabilities to the training sample
df['prob_NO'] = clf.predict_proba(X_test)[:,0]
df['prob_YES'] = clf.predict_proba(X_test)[:,1]
df['prediction'] = clf.predict(X_test)
print(df.head(10))
print('\nNumber of yes predictions:', df.prediction.sum())
print('\nOverall training set accuracy:', clf.score(X_test, y_test))
print('\n----Gaussian----')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf2 = GaussianNB()
clf2.fit(X_train, y_train)
df['prob_NO'] = clf2.predict_proba(X_test)[:,0]
df['prob_YES'] = clf2.predict_proba(X_test)[:,1]
df['prediction'] = clf2.predict(X_test)
print(df.head(10))
print('\nNumber of yes predictions:', df.prediction.sum())
print('\nOverall training set accuracy:', clf2.score(X_test, y_test))
### CROSS VALIDATION ###
print('\n\n### CROSS VALIDATION ###\n')
# Adapted from Scikit Learn documentation
names = ["Naive_Bayes", "Logistic_Regression"]
models = [
BernoulliNB(
alpha=1.0,
binarize=0.5,
class_prior = [0.5, 0.5],
fit_prior=False
),
LogisticRegression()
]
# Shuffle the rows
np.random.seed(RANDOM_SEED)
np.random.shuffle(model_data)
num_folds = 10
# set up numpy array for storing results
results = np.zeros((num_folds, len(names)))
kf = KFold(
n_splits=num_folds,
shuffle=False,
random_state=RANDOM_SEED
)
i = 0
for train_index, test_index in kf.split(model_data):
print('\nFold index:', i, '-----')
X_train = model_data[train_index, 0:X.shape[1]]
y_train = model_data[train_index, X.shape[1]]
X_test = model_data[test_index, 0:X.shape[1]]
y_test = model_data[test_index, X.shape[1]]
print('\nShape of input data for this fold:\n')
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('\nX_test:', X_test.shape)
print('y_test:', y_test.shape)
j = 0
for name, model in zip(names, models):
print('\nClassifier evaluation for:', name)
print('\nScikit Learn method:', model)
model.fit(X_train, y_train)
# evaluate on the test set for this fold
y_prob = model.predict_proba(X_test)
print('\nNumber of yes predictions:', model.predict(X_test).sum())
auroc = roc_auc_score(y_test, y_prob[:, 1])
print('Area under ROC curve:', auroc)
results[i, j] = auroc
plt.figure()
plot_confusion_matrix(
cm=metrics.confusion_matrix(y_test, model.predict(X_test)),
classes=['no','yes'],
model=name + str(i)
)
plt.show()
j += 1
i += 1
df = pd.DataFrame(results)
df.columns = names
print('\n----------------------------------------------')
print('Average results from ',
num_folds,
'-fold cross-validation\n',
'\nMethod Area under ROC Curve', sep = '')
print(df.mean())
| sgranitz/nw | predict422/naive-bayes_logistic-regression.py | Python | mit | 8,238 | [
"Gaussian"
] | 33af756104b59f7f8520be54c4130183fbbbb81c6a95668b2211b1497393712b |
import os.path as op
import pytest
import mne
from mne.datasets import testing
from mne.io.pick import pick_channels_cov
from mne.utils import (check_random_state, _check_fname, check_fname,
_check_subject, requires_mayavi, traits_test,
_check_mayavi_version, _check_info_inv, _check_option)
data_path = testing.data_path(download=False)
base_dir = op.join(data_path, 'MEG', 'sample')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_event = op.join(base_dir, 'sample_audvis_trunc_raw-eve.fif')
fname_fwd = op.join(base_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
reject = dict(grad=4000e-13, mag=4e-12)
def test_check():
"""Test checking functions."""
pytest.raises(ValueError, check_random_state, 'foo')
pytest.raises(TypeError, _check_fname, 1)
pytest.raises(IOError, check_fname, 'foo', 'tets-dip.x', (), ('.fif',))
pytest.raises(ValueError, _check_subject, None, None)
pytest.raises(TypeError, _check_subject, None, 1)
pytest.raises(TypeError, _check_subject, 1, None)
@requires_mayavi
@traits_test
def test_check_mayavi():
"""Test mayavi version check."""
pytest.raises(RuntimeError, _check_mayavi_version, '100.0.0')
def _get_data():
"""Read in data used in tests."""
# read forward model
forward = mne.read_forward_solution(fname_fwd)
# read data
raw = mne.io.read_raw_fif(fname_raw, preload=True)
events = mne.read_events(fname_event)
event_id, tmin, tmax = 1, -0.1, 0.15
# decimate for speed
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, selection=left_temporal_channels)
picks = picks[::2]
raw.pick_channels([raw.ch_names[ii] for ii in picks])
del picks
raw.info.normalize_proj() # avoid projection warnings
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
baseline=(None, 0.), preload=True, reject=reject)
noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15)
return epochs, data_cov, noise_cov, forward
@testing.requires_testing_data
def test_check_info_inv():
"""Test checks for common channels across fwd model and cov matrices."""
epochs, data_cov, noise_cov, forward = _get_data()
# make sure same channel lists exist in data to make testing life easier
assert epochs.info['ch_names'] == data_cov.ch_names
assert epochs.info['ch_names'] == noise_cov.ch_names
# check whether bad channels get excluded from the channel selection
# info
info_bads = epochs.info.copy()
info_bads['bads'] = info_bads['ch_names'][1:3] # include two bad channels
picks = _check_info_inv(info_bads, forward, noise_cov=noise_cov)
assert [1, 2] not in picks
# covariance matrix
data_cov_bads = data_cov.copy()
data_cov_bads['bads'] = data_cov_bads.ch_names[0]
picks = _check_info_inv(epochs.info, forward, data_cov=data_cov_bads)
assert 0 not in picks
# noise covariance matrix
noise_cov_bads = noise_cov.copy()
noise_cov_bads['bads'] = noise_cov_bads.ch_names[1]
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov_bads)
assert 1 not in picks
# test whether reference channels get deleted
info_ref = epochs.info.copy()
info_ref['chs'][0]['kind'] = 301 # pretend to have a ref channel
picks = _check_info_inv(info_ref, forward, noise_cov=noise_cov)
assert 0 not in picks
# pick channels in all inputs and make sure common set is returned
epochs.pick_channels([epochs.ch_names[ii] for ii in range(10)])
data_cov = pick_channels_cov(data_cov, include=[data_cov.ch_names[ii]
for ii in range(5, 20)])
noise_cov = pick_channels_cov(noise_cov, include=[noise_cov.ch_names[ii]
for ii in range(7, 12)])
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov,
data_cov=data_cov)
assert list(range(7, 10)) == picks
def test_check_option():
"""Test checking the value of a parameter against a list of options."""
allowed_values = ['valid', 'good', 'ok']
# Value is allowed
assert _check_option('option', 'valid', allowed_values)
assert _check_option('option', 'good', allowed_values)
assert _check_option('option', 'ok', allowed_values)
assert _check_option('option', 'valid', ['valid'])
# Check error message for invalid value
msg = ("Invalid value for the 'option' parameter. Allowed values are "
"'valid', 'good' and 'ok', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', allowed_values)
# Special error message if only one value is allowed
msg = ("Invalid value for the 'option' parameter. The only allowed value "
"is 'valid', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', ['valid'])
| adykstra/mne-python | mne/utils/tests/test_check.py | Python | bsd-3-clause | 5,145 | [
"Mayavi"
] | 5e46ffa0963d6c1f611ec94b432069f28074fc7727a84067b805a75c93d8084f |
#!/usr/bin/env python
import math, numpy as np
#from enthought.mayavi import mlab
import matplotlib.pyplot as pp
import matplotlib.cm as cm
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from hrl_msgs.msg import FloatArrayBare
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
def callback(data, callback_args):
rospy.loginfo('Getting data!')
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
print np.shape(t1)
print np.shape(r1)
force_vectors = np.row_stack([data.forces_x, data.forces_y, data.forces_z])
contact_vectors = np.row_stack([data.centers_x, data.centers_y, data.centers_z])
fmags = ut.norm(force_vectors)
print np.shape(contact_vectors)
contact_regions = fmags > 0.01
lb,ls = ni.label(contact_regions)
total_contact = ni.sum(lb) # After thresholding, assuming one connected component (experiment designed that way)
if total_contact > 1:
# Calculating time:
global time
time = time + 0.01
global contact_point_local, contact_point_world
local_x = ni.mean(contact_vectors[0,:],lb) # After thresholding, assuming one connected component (experiment designed that way)
local_y = ni.mean(contact_vectors[1,:],lb) # After thresholding, assuming one connected component (experiment designed that way)
local_z = ni.mean(contact_vectors[2,:],lb) # After thresholding, assuming one connected component (experiment designed that way)
contact_point_local = np.column_stack([local_x,local_y,local_z])
print np.shape(contact_point_local)
contact_point_world = r1*(contact_point_local.T) + t1
time_instant_data = [time,contact_point_world[0],contact_point_world[1],contact_point_world[2]]
global time_varying_data
time_varying_data = np.row_stack([time_varying_data, time_instant_data])
def tracking_point():
rospy.loginfo('Tracking Distance!')
ta = time_varying_data
k = 0
for i in ta[:,0]:
if i != ta[-1,0]:
instant_dist = math.sqrt((ta[k+1,1]-ta[1,1])**2 + (ta[k+1,2]-ta[1,2])**2 + (ta[k+1,3]-ta[1,3])**2)
time_instant_tracker = [ta[k+1,0], instant_dist]
global time_varying_tracker
time_varying_tracker = np.row_stack([time_varying_tracker, time_instant_tracker])
k=k+1
def savedata():
rospy.loginfo('Saving data!')
global time_varying_tracker
ut.save_pickle(time_varying_tracker, '/home/tapo/svn/robot1_data/usr/tapo/data/Variable_Stiffness_Variable_Velocity/low_vel_low_stiff/soft_movable/sponge_movable/tracking_trial_5.pkl')
def plotdata():
rospy.loginfo('Plotting data!')
mpu.figure(1)
ta = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/Variable_Stiffness_Variable_Velocity/low_vel_low_stiff/soft_movable/sponge_movable/tracking_trial_5.pkl')
pp.title('Point Tracker')
pp.xlabel('Time (s)')
pp.ylabel('Contact Point Distance')
pp.plot(ta[:,0], ta[:,1])
pp.grid('on')
def getdata():
rospy.init_node('point_tracker', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray_Meka, callback, callback_args = (tf_lstnr))
rospy.spin()
if __name__ == '__main__':
time = 0
contact_point_local = [0,0,0]
contact_point_world = [0,0,0]
time_varying_data = [0,0,0,0]
time_varying_tracker = [0,0]
getdata()
tracking_point()
savedata()
plotdata()
pp.show()
| tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/Cody_Data/point_tracker.py | Python | mit | 4,448 | [
"Mayavi"
] | 8a49055e3c6fdd25408ce070054ae4d1a97252b652aa2741d4e0929d8d1ab1af |
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. If nobody volunteers to write documentation and
make it more stable, this module will be removed in version 0.11.
"""
import string
import warnings
import numpy as np
from .utils import check_random_state
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publically.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, obs):
"""Compute the log probability under the model and compute posteriors
Implements rank and beam pruning in the forward-backward
algorithm to speed up inference in large models.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence `obs`
posteriors: array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the `obs`
See Also
--------
eval : Compute the log probability under the model and posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
_, posteriors = self.eval(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.eval(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in xrange(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string ''. Likewise, if you
would like just to do an initialization, call this method with
n_iter=0.
Parameters
----------
obs : list
List of array-like observation sequences (shape (n_i, n_features)).
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data, or
decreasing `covars_prior`.
**Please note that setting parameters in the `fit` method is
deprecated and will be removed in the next release.
Set it on initialization instead.**
"""
if kwargs:
warnings.warn("Setting parameters in the 'fit' method is"
"deprecated and will be removed in 0.14. Set it on "
"initialization instead.", DeprecationWarning,
stacklevel=2)
# initialisations for in case the user still adds parameters to fit
# so things don't break
for name in ('n_iter', 'thresh', 'params', 'init_params'):
if name in kwargs:
setattr(self, name, kwargs[name])
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in xrange(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats["trans"] += np.exp(logsumexp(lneta, 0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in xrange(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / cv_den
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in xrange(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asanyarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input containes negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can \
contain any combination of 's' for startprob, 't' for transmat, 'm' \
for means, and 'c' for covars, etc. Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat,'m' for
means, and 'c' for covars, etc. Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in xrange(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.eval(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| mrshu/scikit-learn | sklearn/hmm.py | Python | bsd-3-clause | 46,104 | [
"Gaussian"
] | 70e957ae654d0c18a2d35d4ae39d83649a64a4d8700509a2cd9957576c2a31b0 |
import logging
import threading
import time
import re
from collections import defaultdict
from typing import Optional, Union
import pytz
from doctor_schedule.models import ScheduleResource
from laboratory.settings import SYSTEM_AS_VI, SOME_LINKS, DISABLED_FORMS, DISABLED_STATISTIC_CATEGORIES, DISABLED_STATISTIC_REPORTS, TIME_ZONE
from utils.response import status_response
from django.core.validators import validate_email
from django.db.utils import IntegrityError
from utils.data_verification import as_model, data_parse
import simplejson as json
import yaml
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group, User
from django.core.cache import cache
from django.db import connections, transaction
from django.db.models import Q, Prefetch
from django.http import JsonResponse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
import api.models as models
import directions.models as directions
import users.models as users
from contracts.models import Company
from api import fias
from appconf.manager import SettingManager
from barcodes.views import tubes
from clients.models import CardBase, Individual, Card, Document, District
from context_processors.utils import menu
from directory.models import Fractions, ParaclinicInputField, ParaclinicUserInputTemplateField, ResearchSite, Culture, Antibiotic, ResearchGroup, Researches as DResearches, ScreeningPlan
from doctor_call.models import DoctorCall
from external_system.models import FsliRefbookTest
from hospitals.models import Hospitals, DisableIstochnikiFinansirovaniya
from laboratory.decorators import group_required
from laboratory.utils import strdatetime
from pharmacotherapy.models import Drugs
from podrazdeleniya.models import Podrazdeleniya
from slog import models as slog
from slog.models import Log
from statistics_tickets.models import VisitPurpose, ResultOfTreatment, StatisticsTicket, Outcomes, ExcludePurposes
from tfoms.integration import match_enp
from utils.common import non_selected_visible_type
from utils.dates import try_parse_range, try_strptime
from utils.nsi_directories import NSI
from .sql_func import users_by_group, users_all, get_diagnoses, get_resource_researches
from laboratory.settings import URL_RMIS_AUTH, URL_ELN_MADE, URL_SCHEDULE
import urllib.parse
logger = logging.getLogger("API")
def translit(locallangstring):
"""
Translit func
:param locallangstring: orign
:return: translit of locallangstring
"""
conversion = {
u'\u0410': 'A',
u'\u0430': 'a',
u'\u0411': 'B',
u'\u0431': 'b',
u'\u0412': 'V',
u'\u0432': 'v',
u'\u0413': 'G',
u'\u0433': 'g',
u'\u0414': 'D',
u'\u0434': 'd',
u'\u0415': 'E',
u'\u0435': 'e',
u'\u0401': 'Yo',
u'\u0451': 'yo',
u'\u0416': 'Zh',
u'\u0436': 'zh',
u'\u0417': 'Z',
u'\u0437': 'z',
u'\u0418': 'I',
u'\u0438': 'i',
u'\u0419': 'Y',
u'\u0439': 'y',
u'\u041a': 'K',
u'\u043a': 'k',
u'\u041b': 'L',
u'\u043b': 'l',
u'\u041c': 'M',
u'\u043c': 'm',
u'\u041d': 'N',
u'\u043d': 'n',
u'\u041e': 'O',
u'\u043e': 'o',
u'\u041f': 'P',
u'\u043f': 'p',
u'\u0420': 'R',
u'\u0440': 'r',
u'\u0421': 'S',
u'\u0441': 's',
u'\u0422': 'T',
u'\u0442': 't',
u'\u0423': 'U',
u'\u0443': 'u',
u'\u0424': 'F',
u'\u0444': 'f',
u'\u0425': 'H',
u'\u0445': 'h',
u'\u0426': 'Ts',
u'\u0446': 'ts',
u'\u0427': 'Ch',
u'\u0447': 'ch',
u'\u0428': 'Sh',
u'\u0448': 'sh',
u'\u0429': 'Sch',
u'\u0449': 'sch',
u'\u042a': '',
u'\u044a': '',
u'\u042b': 'Y',
u'\u044b': 'y',
u'\u042c': '',
u'\u044c': '',
u'\u042d': 'E',
u'\u044d': 'e',
u'\u042e': 'Yu',
u'\u044e': 'yu',
u'\u042f': 'Ya',
u'\u044f': 'ya',
}
translitstring = []
for c in locallangstring:
translitstring.append(conversion.setdefault(c, c))
return ''.join(translitstring)
@csrf_exempt
def send(request):
"""
Sysmex save results
:param request:
:return:
"""
result = {"ok": False}
try:
if request.method == "POST":
resdict = yaml.load(request.POST["result"])
appkey = request.POST.get("key", "")
else:
resdict = yaml.load(request.GET["result"])
appkey = request.GET.get("key", "")
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
app = models.Application.objects.filter(key=appkey, active=True).first()
resdict["pk"] = int(resdict.get("pk", -111))
if "LYMPH%" in resdict["result"]:
resdict["orders"] = {}
dpk = -1
if ("bydirection" in request.POST or "bydirection" in request.GET) and not app.tube_work:
dpk = resdict["pk"]
if dpk >= 4600000000000:
dpk -= 4600000000000
dpk //= 10
tubes(request, direction_implict_id=dpk)
if directions.TubesRegistration.objects.filter(issledovaniya__napravleniye__pk=dpk, issledovaniya__time_confirmation__isnull=True).exists():
resdict["pk"] = directions.TubesRegistration.objects.filter(issledovaniya__napravleniye__pk=dpk, issledovaniya__time_confirmation__isnull=True).order_by("pk").first().pk
else:
resdict["pk"] = False
result["A"] = appkey
direction = None
if resdict["pk"] and app:
if app.tube_work:
direction = directions.Napravleniya.objects.filter(issledovaniya__tubes__pk=resdict["pk"]).first()
elif directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
tubei = directions.TubesRegistration.objects.get(pk=resdict["pk"])
direction = tubei.issledovaniya_set.first().napravleniye
pks = []
for key in resdict["result"].keys():
if models.RelationFractionASTM.objects.filter(astm_field=key).exists():
fractionRels = models.RelationFractionASTM.objects.filter(astm_field=key)
for fractionRel in fractionRels:
fraction = fractionRel.fraction
if directions.Issledovaniya.objects.filter(napravleniye=direction, research=fraction.research, time_confirmation__isnull=True).exists():
issled = directions.Issledovaniya.objects.filter(napravleniye=direction, research=fraction.research, time_confirmation__isnull=True).order_by("pk")[0]
if directions.Result.objects.filter(issledovaniye=issled, fraction=fraction).exists():
fraction_result = directions.Result.objects.filter(issledovaniye=issled, fraction__pk=fraction.pk).order_by("-pk")[0]
else:
fraction_result = directions.Result(issledovaniye=issled, fraction=fraction)
fraction_result.value = str(resdict["result"][key]).strip() # Установка значения
if 'Non-React' in fraction_result.value:
fraction_result.value = 'Отрицательно'
if fraction_result.value.isdigit():
fraction_result.value = "%s.0" % fraction_result.value
find = re.findall(r"\d+.\d+", fraction_result.value)
if len(find) > 0:
val = float(find[0]) * fractionRel.get_multiplier_display()
val = app.auto_set_places(fractionRel, val)
fraction_result.value = fraction_result.value.replace(find[0], str(val))
fraction_result.iteration = 1 # Установка итерации
ref = fractionRel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save() # Сохранение
issled.api_app = app
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user # Кто сохранил
fraction_result.issledovaniye.time_save = timezone.now() # Время сохранения
fraction_result.issledovaniye.save()
if issled not in pks:
pks.append(issled)
slog.Log(key=appkey, type=22, body=json.dumps(resdict), user=None).save()
result["ok"] = True
elif not directions.TubesRegistration.objects.filter(pk=resdict["pk"]).exists():
if dpk > -1:
resdict["pk"] = dpk
slog.Log(key=resdict["pk"], type=23, body=json.dumps(resdict), user=None).save()
except Exception as e:
result = {"ok": False, "Exception": True, "MSG": str(e)}
return JsonResponse(result)
@csrf_exempt
def endpoint(request):
result = {"answer": False, "body": "", "patientData": {}}
data = json.loads(request.POST.get("result", request.GET.get("result", "{}")))
api_key = request.POST.get("key", request.GET.get("key", ""))
message_type = data.get("message_type", "C")
pk_s = str(data.get("pk", "")).strip()
iss_s = str(data.get("iss_pk", "-1")).strip()
pk = -1 if not pk_s.isdigit() else int(pk_s)
iss_pk = -1 if not iss_s.isdigit() else int(iss_s)
data["app_name"] = "API key is incorrect"
# pid = data.get("processing_id", "P")
if models.Application.objects.filter(key=api_key).exists():
astm_user = users.DoctorProfile.objects.filter(user__username="astm").first()
if astm_user is None:
astm_user = users.DoctorProfile.objects.filter(user__is_staff=True).order_by("pk").first()
app = models.Application.objects.get(key=api_key)
if app.active:
data["app_name"] = app.name
if message_type == "R" or data.get("result") or message_type == "R_BAC":
if pk != -1 or iss_pk != -1:
direction: Union[directions.Napravleniya, None] = None
dw = app.direction_work or message_type == "R_BAC"
if pk >= 4600000000000:
pk -= 4600000000000
pk //= 10
dw = True
if pk == -1:
iss = directions.Issledovaniya.objects.filter(pk=iss_pk)
if iss.exists():
direction = iss[0].napravleniye
elif dw:
direction = directions.Napravleniya.objects.filter(pk=pk).first()
else:
direction = directions.Napravleniya.objects.filter(issledovaniya__tubes__pk=pk).first()
pks = []
oks = []
if direction is not None:
if message_type == "R" or (data.get("result") and message_type == 'C'):
result["patientData"] = {
"fio": direction.client.individual.fio(short=True),
"card": direction.client.number_with_type(),
}
result["patientData"]["fioTranslit"] = translit(result["patientData"]["fio"])
result["patientData"]["cardTranslit"] = translit(result["patientData"]["card"])
results = data.get("result", {})
for key in results:
ok = False
q = models.RelationFractionASTM.objects.filter(astm_field=key)
if q.filter(application_api=app).exists():
q = q.filter(application_api=app)
ok = True
elif q.filter(application_api__isnull=True).exists():
q = q.filter(application_api__isnull=True)
ok = True
if ok:
for fraction_rel in q:
save_state = []
issleds = []
for issled in directions.Issledovaniya.objects.filter(
napravleniye=direction, research=fraction_rel.fraction.research, time_confirmation__isnull=True
):
if directions.Result.objects.filter(issledovaniye=issled, fraction=fraction_rel.fraction).exists():
fraction_result = directions.Result.objects.filter(issledovaniye=issled, fraction=fraction_rel.fraction).order_by("-pk")[0]
else:
fraction_result = directions.Result(issledovaniye=issled, fraction=fraction_rel.fraction)
fraction_result.value = str(results[key]).strip()
if 'Non-React' in fraction_result.value:
fraction_result.value = 'Отрицательно'
find = re.findall(r"\d+.\d+", fraction_result.value)
if len(find) == 0 and fraction_result.value.isdigit():
find = [fraction_result.value]
if len(find) > 0:
val_str = fraction_result.value
for f in find:
try:
val = float(f) * fraction_rel.get_multiplier_display()
val = app.auto_set_places(fraction_rel, val)
val_str = val_str.replace(f, str(val))
except Exception as e:
logger.exception(e)
fraction_result.value = val_str
fraction_result.iteration = 1
ref = fraction_rel.default_ref
if ref:
fraction_result.ref_title = ref.title
fraction_result.ref_about = ref.about
fraction_result.ref_m = ref.m
fraction_result.ref_f = ref.f
fraction_result.save()
issled.api_app = app
issled.save()
fraction_result.get_ref(re_save=True)
fraction_result.issledovaniye.doc_save = astm_user
fraction_result.issledovaniye.time_save = timezone.now()
fraction_result.issledovaniye.save()
save_state.append({"fraction": fraction_result.fraction.title, "value": fraction_result.value})
issleds.append({"pk": issled.pk, "title": issled.research.title})
if issled not in pks:
pks.append(issled)
oks.append(ok)
elif message_type == "R_BAC":
mo = data.get('mo')
if mo:
code = mo.get('code')
name = mo.get('name')
anti = data.get('anti', {})
comments = data.get('comments', [])
if code:
culture = Culture.objects.filter(lis=code).first()
iss = directions.Issledovaniya.objects.filter(napravleniye=direction, time_confirmation__isnull=True, research__is_microbiology=True)
if iss.filter(pk=iss_pk).exists():
iss = iss.filter(pk=iss_pk)
iss = iss.first()
if not culture:
print('NO CULTURE', code, name) # noqa: T001
elif not iss:
print('IGNORED') # noqa: T001
else:
directions.MicrobiologyResultCulture.objects.filter(issledovaniye=iss, culture=culture).delete()
comments = '\n'.join(
[c["text"] for c in comments if not c["text"].startswith('S:') and not c["text"].startswith('R:') and not c["text"].startswith('I:')]
)
culture_result = directions.MicrobiologyResultCulture(issledovaniye=iss, culture=culture, comments=comments)
culture_result.save()
for a in anti:
anti_r = anti[a]
anti_obj = Antibiotic.objects.filter(lis=a).first()
if anti_obj and anti_r.get('RSI'):
a_name = anti_r.get('name', '').replace('µg', 'мг')
a_name_parts = a_name.split()
a_name = a_name_parts[-2] + ' ' + a_name_parts[-1]
anti_result = directions.MicrobiologyResultCultureAntibiotic(
result_culture=culture_result,
antibiotic=anti_obj,
sensitivity=anti_r.get('RSI'),
dia=anti_r.get('dia', ''),
antibiotic_amount=a_name,
)
anti_result.save()
result["body"] = "{} {} {} {} {}".format(dw, pk, iss_pk, json.dumps(oks), direction is not None)
else:
result["body"] = "pk '{}' is not exists".format(pk_s)
elif message_type == "Q":
result["answer"] = True
pks = [int(x) for x in data.get("query", []) if isinstance(x, int) or (isinstance(x, str) and x.isdigit())]
researches = defaultdict(list)
for row in app.get_issledovaniya(pks):
k = row["pk"]
i = row["iss"]
result["patientData"] = {
"fio": i.napravleniye.client.individual.fio(short=True),
"card": i.napravleniye.client.number_with_type(),
}
result["patientData"]["fioTranslit"] = translit(result["patientData"]["fio"])
result["patientData"]["cardTranslit"] = translit(result["patientData"]["card"])
for fraction in Fractions.objects.filter(research=i.research, hide=False):
rel = models.RelationFractionASTM.objects.filter(fraction=fraction, application_api=app)
if not rel.exists():
continue
# rel = models.RelationFractionASTM.objects.filter(fraction=fraction)
# if not rel.exists():
# continue
rel = rel[0]
researches[k].append(rel.astm_field)
result["body"] = researches
else:
pass
else:
data["app_name"] = "API app banned " + api_key
result["body"] = "API app banned " + api_key
else:
result["body"] = "API key is incorrect"
slog.Log(key=pk, type=6000, body=json.dumps({"data": data, "answer": result}), user=None).save()
return JsonResponse(result)
@login_required
def departments(request):
req = json.loads(request.body)
method = req.get('method', 'GET')
without_default = req.get('withoutDefault', False)
current_user_hospital_id = request.user.doctorprofile.get_hospital_id() or -1
hospital_pk = req.get('hospital', current_user_hospital_id)
su = request.user.is_superuser or request.user.doctorprofile.all_hospitals_users_control
if hospital_pk == -1:
hospital_pk = None
if hospital_pk != current_user_hospital_id and not su:
return JsonResponse({"ok": False})
can_edit = su or request.user.doctorprofile.has_group('Создание и редактирование пользователей')
if method == "GET":
if without_default:
qs = Podrazdeleniya.objects.filter(hospital_id=hospital_pk).order_by("pk")
else:
qs = Podrazdeleniya.objects.filter(Q(hospital_id=hospital_pk) | Q(hospital__isnull=True)).order_by("pk")
deps = [{"pk": x.pk, "title": x.get_title(), "type": str(x.p_type), "oid": x.oid} for x in qs]
en = SettingManager.en()
more_types = []
if SettingManager.is_morfology_enabled(en):
more_types.append({"pk": str(Podrazdeleniya.MORFOLOGY), "title": "Морфология"})
data = {
"departments": deps,
"can_edit": can_edit,
"types": [*[{"pk": str(x[0]), "title": x[1]} for x in Podrazdeleniya.TYPES if x[0] not in [8, 12] and en.get(x[0], True)], *more_types],
}
if hasattr(request, 'plain_response') and request.plain_response:
return data
return JsonResponse(data)
if can_edit:
ok = False
message = ""
try:
data_type = req.get("type", "update")
rows = req.get("data", [])
if data_type == "update":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya.objects.get(pk=row["pk"])
department.title = title
department.p_type = int(row["type"])
department.hospital_id = hospital_pk
department.oid = row.get("oid", '')
department.save()
ok = True
elif data_type == "insert":
ok = False
for row in rows:
title = row["title"].strip()
if len(title) > 0:
department = Podrazdeleniya(title=title, p_type=int(row["type"]), hospital_id=hospital_pk, oid=row.get("oid", ''))
department.save()
ok = True
finally:
return JsonResponse({"ok": ok, "message": message})
return JsonResponse(0)
@login_required
def otds(request):
return JsonResponse(
{"rows": [{"id": -1, "label": "Все отделения"}, *[{"id": x.pk, "label": x.title} for x in Podrazdeleniya.objects.filter(p_type=Podrazdeleniya.DEPARTMENT).order_by("title")]]}
)
@login_required
def laboratory_journal_params(request):
return JsonResponse(
{
"fin": [{"id": x.pk, "label": f"{x.base.title} – {x.title}"} for x in directions.IstochnikiFinansirovaniya.objects.all().order_by("pk").order_by("base")],
"groups": [
{"id": -2, "label": "Все исследования"},
{"id": -1, "label": "Без группы"},
*[{"id": x.pk, "label": f"{x.lab.get_title()} – {x.title}"} for x in ResearchGroup.objects.all()],
],
}
)
def bases(request):
k = f'view:bases:{request.user.pk}'
disabled_fin_source = [i.fin_source.pk for i in DisableIstochnikiFinansirovaniya.objects.filter(
hospital_id=request.user.doctorprofile.hospital_id)] if request.user.is_authenticated else []
user_disabled_fin_source = [x for x in users.DoctorProfile.objects.values_list('disabled_fin_source', flat=True).filter(
pk=request.user.doctorprofile.pk) if x is not None] if request.user.is_authenticated else []
disabled_fin_source.extend(user_disabled_fin_source)
ret = cache.get(k)
if not ret:
ret = {
"bases": [
{
"pk": x.pk,
"title": x.title,
"code": x.short_title,
"hide": x.hide,
"history_number": x.history_number,
"internal_type": x.internal_type,
"fin_sources": [{"pk": y.pk, "title": y.title, "default_diagnos": y.default_diagnos} for y in x.istochnikifinansirovaniya_set.all()],
}
for x in CardBase.objects.all()
.prefetch_related(Prefetch('istochnikifinansirovaniya_set', directions.IstochnikiFinansirovaniya.objects.filter(hide=False).
exclude(pk__in=disabled_fin_source).order_by('-order_weight')))
.order_by('-order_weight')
]
}
cache.set(k, ret, 100)
if hasattr(request, 'plain_response') and request.plain_response:
return ret
return JsonResponse(ret)
@ensure_csrf_cookie
def current_user_info(request):
user = request.user
ret = {
"auth": user.is_authenticated,
"doc_pk": -1,
"username": "",
"fio": "",
"department": {"pk": -1, "title": ""},
"groups": [],
"eds_token": None,
"modules": SettingManager.l2_modules(),
"user_services": [],
"loading": False,
}
if ret["auth"]:
def fill_user_data():
doctorprofile = (
users.DoctorProfile.objects.prefetch_related(
Prefetch(
'restricted_to_direct',
queryset=DResearches.objects.only('pk'),
),
Prefetch(
'users_services',
queryset=DResearches.objects.only('pk'),
),
)
.select_related('podrazdeleniye')
.get(user_id=user.pk)
)
ret["fio"] = doctorprofile.get_full_fio()
ret["email"] = doctorprofile.email or ''
ret["doc_pk"] = doctorprofile.pk
ret["rmis_location"] = doctorprofile.rmis_location
ret["rmis_login"] = doctorprofile.rmis_login
ret["rmis_password"] = doctorprofile.rmis_password
ret["department"] = {"pk": doctorprofile.podrazdeleniye_id, "title": doctorprofile.podrazdeleniye.title}
ret["restricted"] = [x.pk for x in doctorprofile.restricted_to_direct.all()]
ret["user_services"] = [x.pk for x in doctorprofile.users_services.all() if x not in ret["restricted"]]
ret["hospital"] = doctorprofile.get_hospital_id()
ret["hospital_title"] = doctorprofile.get_hospital_title()
ret["all_hospitals_users_control"] = doctorprofile.all_hospitals_users_control
ret["specialities"] = [] if not doctorprofile.specialities else [doctorprofile.specialities.title]
ret["groups"] = list(user.groups.values_list('name', flat=True))
if SYSTEM_AS_VI:
for i in range(len(ret["groups"])):
if ret["groups"][i] == 'Картотека L2':
ret["groups"][i] = 'Картотека'
if user.is_superuser:
ret["groups"].append("Admin")
ret["eds_allowed_sign"] = doctorprofile.get_eds_allowed_sign() if ret['modules'].get('l2_eds') else []
ret["can_edit_all_department"] = doctorprofile.all_hospitals_users_control
try:
connections.close_all()
except Exception as e:
print(f"Error closing connections {e}") # noqa: T001
def fill_settings():
ret["su"] = user.is_superuser
ret["username"] = user.username
ret["modules"] = SettingManager.l2_modules()
ret["rmis_enabled"] = SettingManager.get("rmis_enabled", default='false', default_type='b')
ret["directions_params_org_form_default_pk"] = SettingManager.get("directions_params_org_form_default_pk", default='', default_type='s')
en = SettingManager.en()
ret["extended_departments"] = {}
st_base = ResearchSite.objects.filter(hide=False).order_by('order', 'title')
sites_by_types = {}
for s in st_base:
if s.site_type not in sites_by_types:
sites_by_types[s.site_type] = []
sites_by_types[s.site_type].append({"pk": s.pk, "title": s.title, "type": s.site_type, "extended": True, 'e': s.site_type + 4})
# Тут 13 – заявления, 11 – формы, 7 – формы минус 4
if 13 in en and 11 in en:
if 7 not in sites_by_types:
sites_by_types[7] = []
if SettingManager.get("l2_applications"):
sites_by_types[7].append(
{
"pk": -13,
"title": "Заявления",
"type": 7,
"extended": True,
'e': 11,
}
)
for e in en:
if e < 4 or not en[e] or e == 13:
continue
t = e - 4
has_def = DResearches.objects.filter(hide=False, site_type__isnull=True, **DResearches.filter_type(e)).exists()
if has_def and e != 12:
d = [{"pk": None, "title": 'Общие', 'type': t, "extended": True}]
else:
d = []
ret["extended_departments"][e] = [*d, *sites_by_types.get(t, [])]
if SettingManager.is_morfology_enabled(en):
ret["extended_departments"][Podrazdeleniya.MORFOLOGY] = []
if en.get(8):
ret["extended_departments"][Podrazdeleniya.MORFOLOGY].append(
{"pk": Podrazdeleniya.MORFOLOGY + 1, "title": "Микробиология", "type": Podrazdeleniya.MORFOLOGY, "extended": True, "e": Podrazdeleniya.MORFOLOGY}
)
if en.get(9):
ret["extended_departments"][Podrazdeleniya.MORFOLOGY].append(
{"pk": Podrazdeleniya.MORFOLOGY + 2, "title": "Цитология", "type": Podrazdeleniya.MORFOLOGY, "extended": True, "e": Podrazdeleniya.MORFOLOGY}
)
if en.get(10):
ret["extended_departments"][Podrazdeleniya.MORFOLOGY].append(
{"pk": Podrazdeleniya.MORFOLOGY + 3, "title": "Гистология", "type": Podrazdeleniya.MORFOLOGY, "extended": True, "e": Podrazdeleniya.MORFOLOGY}
)
try:
connections.close_all()
except Exception as e:
print(f"Error closing connections {e}") # noqa: T001
t1 = threading.Thread(target=fill_user_data)
t2 = threading.Thread(target=fill_settings)
t1.start()
t2.start()
t1.join()
t2.join()
if hasattr(request, 'plain_response') and request.plain_response:
return ret
return JsonResponse(ret)
def get_menu(request):
data = menu(request)
return JsonResponse(
{
"buttons": data["mainmenu"],
"version": data["version"],
"region": SettingManager.get("region", default='38', default_type='s'),
}
)
@login_required
def directive_from(request):
data = []
hospital = request.user.doctorprofile.hospital
for dep in (
Podrazdeleniya.objects.filter(p_type__in=(Podrazdeleniya.DEPARTMENT, Podrazdeleniya.HOSP, Podrazdeleniya.PARACLINIC), hospital__in=(hospital, None))
.prefetch_related(
Prefetch(
'doctorprofile_set',
queryset=(
users.DoctorProfile.objects.filter(user__groups__name__in=["Лечащий врач", "Врач параклиники"])
.distinct('fio', 'pk')
.filter(Q(hospital=hospital) | Q(hospital__isnull=True))
.order_by("fio")
),
)
)
.order_by('title')
.only('pk', 'title')
):
d = {
"pk": dep.pk,
"title": dep.title,
"docs": [{"pk": x.pk, "fio": x.get_full_fio()} for x in dep.doctorprofile_set.all()],
}
data.append(d)
result = {"data": data}
if hasattr(request, 'plain_response') and request.plain_response:
return result
return JsonResponse(result)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_types(request):
result = {
"visit": non_selected_visible_type(VisitPurpose),
"result": non_selected_visible_type(ResultOfTreatment),
"outcome": non_selected_visible_type(Outcomes),
"exclude": non_selected_visible_type(ExcludePurposes),
}
return JsonResponse(result)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_send(request):
response = {"ok": True}
rd = json.loads(request.body)
ofname = rd.get("ofname") or -1
doc = None
if ofname > -1 and users.DoctorProfile.objects.filter(pk=ofname).exists():
doc = users.DoctorProfile.objects.get(pk=ofname)
t = StatisticsTicket(
card=Card.objects.get(pk=rd["card_pk"]),
purpose=VisitPurpose.objects.filter(pk=rd["visit"]).first(),
result=ResultOfTreatment.objects.filter(pk=rd["result"]).first(),
info=rd["info"].strip(),
first_time=rd["first_time"],
primary_visit=rd["primary_visit"],
dispensary_registration=int(rd["disp"]),
doctor=doc or request.user.doctorprofile,
creator=request.user.doctorprofile,
outcome=Outcomes.objects.filter(pk=rd["outcome"]).first(),
dispensary_exclude_purpose=ExcludePurposes.objects.filter(pk=rd["exclude"]).first(),
dispensary_diagnos=rd["disp_diagnos"],
date_ticket=rd.get("date_ticket", None),
)
t.save()
Log(key="", type=7000, body=json.dumps(rd), user=request.user.doctorprofile).save()
return JsonResponse(response)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_get(request):
response = {"data": []}
request_data = json.loads(request.body)
date_start, date_end = try_parse_range(request_data["date"])
n = 0
for row in (
StatisticsTicket.objects.filter(Q(doctor=request.user.doctorprofile) | Q(creator=request.user.doctorprofile))
.filter(
date__range=(
date_start,
date_end,
)
)
.order_by('pk')
):
if not row.invalid_ticket:
n += 1
response["data"].append(
{
"pk": row.pk,
"n": n if not row.invalid_ticket else '',
"doc": row.doctor.get_fio(),
"date_ticket": row.get_date(),
"department": row.doctor.podrazdeleniye.get_title(),
"patinet": row.card.individual.fio(full=True),
"card": row.card.number_with_type(),
"purpose": row.purpose.title if row.purpose else "",
"first_time": row.first_time,
"primary": row.primary_visit,
"info": row.info,
"disp": (
row.get_dispensary_registration_display()
+ (" (" + row.dispensary_diagnos + ")" if row.dispensary_diagnos != "" else "")
+ (" (" + row.dispensary_exclude_purpose.title + ")" if row.dispensary_exclude_purpose else "")
),
"result": row.result.title if row.result else "",
"outcome": row.outcome.title if row.outcome else "",
"invalid": row.invalid_ticket,
"can_invalidate": row.can_invalidate(),
}
)
return JsonResponse(response)
@group_required("Оформление статталонов", "Лечащий врач", "Оператор лечащего врача")
def statistics_tickets_invalidate(request):
response = {"ok": False, "message": ""}
request_data = json.loads(request.body)
if StatisticsTicket.objects.filter(Q(doctor=request.user.doctorprofile) | Q(creator=request.user.doctorprofile)).filter(pk=request_data.get("pk", -1)).exists():
if StatisticsTicket.objects.get(pk=request_data["pk"]).can_invalidate():
for s in StatisticsTicket.objects.filter(pk=request_data["pk"]):
s.invalid_ticket = request_data.get("invalid", False)
s.save()
response["ok"] = True
Log(key=str(request_data["pk"]), type=7001, body=json.dumps(request_data.get("invalid", False)), user=request.user.doctorprofile).save()
else:
response["message"] = "Время на отмену или возврат истекло"
return JsonResponse(response)
def delete_keys_from_dict(dict_del, lst_keys):
for k in lst_keys:
try:
del dict_del[k]
except KeyError:
pass
for v in dict_del.values():
if isinstance(v, dict):
delete_keys_from_dict(v, lst_keys)
if isinstance(v, list):
for ll in v:
delete_keys_from_dict(ll, lst_keys)
return dict_del
def get_reset_time_vars(n):
ctp = int(0 if not n.visit_date else int(time.mktime(timezone.localtime(n.visit_date).timetuple())))
ctime = int(time.time())
cdid = -1 if not n.visit_who_mark else n.visit_who_mark_id
rt = SettingManager.get("visit_reset_time_min", default="20.0", default_type='f') * 60
return cdid, ctime, ctp, rt
def mkb10(request):
kw = request.GET.get("keyword", "").split(' ')[0]
data = []
for d in directions.Diagnoses.objects.filter(d_type="mkb10.4", code__istartswith=kw, hide=False).order_by("code").distinct()[:11]:
data.append({"pk": d.pk, "code": d.code, "title": d.title})
return JsonResponse({"data": data})
def mkb10_dict(request, raw_response=False):
q = (request.GET.get("query", '') or '').strip()
if not q:
if raw_response:
return []
return JsonResponse({"data": []})
if q == '-':
empty = {"code": '-', "title": '', "id": '-'}
if raw_response:
return [empty]
return JsonResponse({"data": [empty]})
d = request.GET.get("dictionary", "mkb10.4")
parts = q.split(' ', 1)
code = "-1"
diag_title = "-1"
if len(parts) == 2:
if re.search(r'^[a-zA-Z0-9]', parts[0]):
code = parts[0]
diag_title = f"{parts[1]}"
else:
diag_title = f"{parts[0]} {parts[1]}"
else:
if re.search(r'^[a-zA-Z0-9]', parts[0]):
code = parts[0]
else:
diag_title = parts[0]
if diag_title != "-1":
diag_title = f"{diag_title}."
if d != "mkb10.combined":
diag_query = get_diagnoses(d_type=d, diag_title=f"{diag_title}", diag_mkb=code)
else:
diag_query = get_diagnoses(d_type="mkb10.5", diag_title=f"{diag_title}", diag_mkb=code, limit=50)
diag_query.extend(get_diagnoses(d_type="mkb10.6", diag_title=f"{diag_title}", diag_mkb=code, limit=50))
data = []
for d in diag_query:
data.append({"code": d.code, "title": d.title, "id": d.nsi_id})
if raw_response:
return data
return JsonResponse({"data": data})
def doctorprofile_search(request):
q = request.GET["query"].strip()
if not q:
return JsonResponse({"data": []})
q = q.split()
sign_org = request.GET.get("signOrg", "")
if sign_org == "true":
d_qs = users.DoctorProfile.objects.filter(hospital=request.user.doctorprofile.get_hospital(), family__istartswith=q[0], user__groups__name__in=["ЭЦП Медицинской организации"])
else:
d_qs = users.DoctorProfile.objects.filter(hospital=request.user.doctorprofile.get_hospital(), family__istartswith=q[0])
if len(q) > 1:
d_qs = d_qs.filter(name__istartswith=q[1])
if len(q) > 2:
d_qs = d_qs.filter(patronymic__istartswith=q[2])
data = []
d: users.DoctorProfile
for d in d_qs.order_by('fio')[:15]:
data.append(
{
"id": d.pk,
"fio": str(d),
"department": d.podrazdeleniye.title if d.podrazdeleniye else "",
**d.dict_data,
}
)
return JsonResponse({"data": data})
def methods_of_taking(request):
prescription = request.GET.get("prescription", "")
kw = request.GET.get("keyword", "")
data = []
m = directions.MethodsOfTaking.objects.filter(drug_prescription=prescription, method_of_taking__istartswith=kw).order_by("-count").distinct()[:10]
for d in m:
data.append({"pk": d.pk, "method_of_taking": d.method_of_taking})
return JsonResponse({"data": data})
def key_value(request):
key = request.GET.get("key", "")
value = request.GET.get("value", "").strip()
limit = int(request.GET.get("limit", "10"))
data = []
for v in directions.KeyValue.objects.filter(key=key, value__istartswith=value).order_by("value").distinct()[:limit]:
data.append({"pk": v.pk, "key": v.key, "value": v.value})
return JsonResponse({"data": data})
def vich_code(request):
kw = request.GET.get("keyword", "")
data = []
for d in directions.Diagnoses.objects.filter(code__istartswith=kw, d_type="vc", hide=False).order_by("code")[:11]:
data.append({"pk": d.pk, "code": d.code, "title": {"-": ""}.get(d.title, d.title)})
return JsonResponse({"data": data})
@login_required
@group_required("Подтверждение отправки результатов в РМИС")
def rmis_confirm_list(request):
request_data = json.loads(request.body)
data = {"directions": []}
date_start, date_end = try_parse_range(request_data["date_from"], request_data["date_to"])
d = (
directions.Napravleniya.objects.filter(istochnik_f__rmis_auto_send=False, force_rmis_send=False, issledovaniya__time_confirmation__range=(date_start, date_end))
.exclude(issledovaniya__time_confirmation__isnull=True)
.distinct()
.order_by("pk")
)
data["directions"] = [{"pk": x.pk, "patient": {"fiodr": x.client.individual.fio(full=True), "card": x.client.number_with_type()}, "fin": x.fin_title} for x in d]
return JsonResponse(data)
@csrf_exempt
def flg(request):
ok = False
dpk = int(request.POST["directionId"])
content = request.POST["content"]
date = try_strptime(request.POST["date"])
doc_f = request.POST["doc"].lower()
if dpk >= 4600000000000:
dpk -= 4600000000000
dpk //= 10
ds = directions.Napravleniya.objects.filter(pk=dpk)
if ds.exists():
d = ds[0]
iss = directions.Issledovaniya.objects.filter(napravleniye=d, research__code="A06.09.006")
if iss.exists():
i = iss[0]
doc = None
gi = None
for u in users.DoctorProfile.objects.filter(podrazdeleniye=i.research.podrazdeleniye):
if u.get_fio().lower() == doc_f or (not doc and u.has_group('Врач параклиники')):
doc = u
gis = ParaclinicInputField.objects.filter(group__research=i.research, group__title="Заключение")
if gis.exists():
gi = gis[0]
if doc and gi:
if not directions.ParaclinicResult.objects.filter(issledovaniye=i, field=gi).exists():
f_result = directions.ParaclinicResult(issledovaniye=i, field=gi, value="")
else:
f_result = directions.ParaclinicResult.objects.filter(issledovaniye=i, field=gi)[0]
if f_result.value != content:
f_result.value = content
f_result.save()
if i.doc_save != doc or i.time_save != date or i.doc_confirmation != doc or i.time_confirmation != date:
i.doc_save = doc
i.time_save = date
i.doc_confirmation = doc
i.time_confirmation = date
if i.napravleniye:
i.napravleniye.qr_check_token = None
i.napravleniye.save(update_fields=['qr_check_token'])
i.save()
if not i.napravleniye.visit_who_mark or not i.napravleniye.visit_date:
i.napravleniye.visit_who_mark = doc
i.napravleniye.visit_date = date
i.napravleniye.save()
slog.Log(key=dpk, type=13, body=json.dumps({"content": content, "doc_f": doc_f}), user=None).save()
return JsonResponse({"ok": ok})
def search_template(request):
result = []
q = request.GET.get('q', '')
if q != '':
for r in users.AssignmentTemplates.objects.filter(title__istartswith=q, global_template=False).order_by('title')[:10]:
result.append({"pk": r.pk, "title": r.title, "researches": [x.research.pk for x in users.AssignmentResearches.objects.filter(template=r, research__hide=False)]})
return JsonResponse({"result": result, "q": q})
def load_templates(request):
result = []
t = request.GET.get('type', '1')
for r in users.AssignmentTemplates.objects.filter(global_template=t == '1').order_by('title'):
result.append({"pk": r.pk, "title": r.title, "researches": [x.research.pk for x in users.AssignmentResearches.objects.filter(template=r, research__hide=False)]})
return JsonResponse({"result": result})
def get_template(request):
title = ''
researches = []
global_template = False
pk = request.GET.get('pk')
department = None
departments = []
departments_paraclinic = []
site_types = {}
show_in_research_picker = False
show_type = None
site_type = None
if pk:
t: users.AssignmentTemplates = users.AssignmentTemplates.objects.get(pk=pk)
title = t.title
researches = [x.research_id for x in users.AssignmentResearches.objects.filter(template=t, research__hide=False)]
global_template = t.global_template
show_in_research_picker = t.show_in_research_picker
show_type = t.get_show_type()
site_type = t.site_type_id
department = t.podrazdeleniye_id
departments = [{"id": x["id"], "label": x["title"]} for x in Podrazdeleniya.objects.filter(hide=False, p_type=Podrazdeleniya.LABORATORY).values('id', 'title')]
departments_paraclinic = [{"id": x["id"], "label": x["title"]} for x in Podrazdeleniya.objects.filter(hide=False, p_type=Podrazdeleniya.PARACLINIC).values('id', 'title')]
for st in users.AssignmentTemplates.SHOW_TYPES_SITE_TYPES_TYPE:
site_types[st] = [
{"id": None, "label": 'Общие'},
*[
{"id": x["id"], "label": x["title"]}
for x in ResearchSite.objects.filter(site_type=users.AssignmentTemplates.SHOW_TYPES_SITE_TYPES_TYPE[st], hide=False).order_by('order', 'title').values('id', 'title')
]
]
return JsonResponse(
{
"title": title,
"researches": researches,
"global_template": global_template,
"department": department,
"departments": departments,
"departmentsParaclinic": departments_paraclinic,
"siteTypes": site_types,
"showInResearchPicker": show_in_research_picker,
"type": show_type,
"siteType": site_type,
}
)
@login_required
@group_required("Конструктор: Настройка шаблонов")
def update_template(request):
response = {"ok": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -2)
if pk > -2:
title = request_data.get("title").strip()
researches = request_data["researches"]
global_template = request_data["global_template"]
if len(title) > 0 and len(researches) > 0:
t = None
if pk == -1:
t = users.AssignmentTemplates(title=title, global_template=global_template)
t.save()
pk = t.pk
elif users.AssignmentTemplates.objects.filter(pk=pk).exists():
t = users.AssignmentTemplates.objects.get(pk=pk)
t.title = title
t.global_template = global_template
t.save()
if t:
t.show_in_research_picker = bool(request_data.get('showInResearchPicker'))
tp = request_data.get('type')
t.podrazdeleniye_id = request_data.get('department') if tp in ('lab', 'paraclinic') else None
t.is_paraclinic = tp == 'paraclinic'
t.is_doc_refferal = tp == 'consult'
t.is_treatment = tp == 'treatment'
t.is_stom = tp == 'stom'
t.is_hospital = tp == 'hospital'
t.is_microbiology = tp == 'microbiology'
t.is_citology = tp == 'citology'
t.is_gistology = tp == 'gistology'
t.site_type_id = request_data.get('siteType') if tp in users.AssignmentTemplates.SHOW_TYPES_SITE_TYPES_TYPE else None
t.save()
users.AssignmentResearches.objects.filter(template=t).exclude(research__pk__in=researches).delete()
to_add = [x for x in researches if not users.AssignmentResearches.objects.filter(template=t, research__pk=x).exists()]
for ta in to_add:
if DResearches.objects.filter(pk=ta).exists():
users.AssignmentResearches(template=t, research=DResearches.objects.get(pk=ta)).save()
response["ok"] = True
return JsonResponse(response)
def modules_view(request):
return JsonResponse({"l2_cards": SettingManager.get("l2_cards_module", default='false', default_type='b')})
def autocomplete(request):
t = request.GET.get("type")
v = request.GET.get("value", "")
limit = int(request.GET.get("limit", 10))
data = []
if v != "" and limit > 0:
if t == "harmful":
p = Card.objects.filter(harmful_factor__istartswith=v).distinct('harmful_factor')[:limit]
if p.exists():
data = [x.harmful_factor for x in p]
elif t == "fias":
data = fias.suggest(v)
elif t == "fias-extended":
data = fias.suggest(v, count=limit, detalized=True)
elif t == "name":
p = Individual.objects.filter(name__istartswith=v).distinct('name')[:limit]
if p.exists():
data = [x.name for x in p]
elif t == "family":
p = Individual.objects.filter(family__istartswith=v).distinct('family')[:limit]
if p.exists():
data = [x.family for x in p]
elif t == "patronymic":
p = Individual.objects.filter(patronymic__istartswith=v).distinct('patronymic')[:limit]
if p.exists():
data = [x.patronymic for x in p]
elif t == "work_place":
p = Card.objects.filter(work_place__istartswith=v).distinct('work_place')[:limit]
if p.exists():
data = [x.work_place for x in p]
elif t == "main_diagnosis":
p = Card.objects.filter(main_diagnosis__istartswith=v).distinct('main_diagnosis')[:limit]
if p.exists():
data = [x.main_diagnosis for x in p]
elif t == "work_position":
p = Card.objects.filter(work_position__istartswith=v).distinct('work_position')[:limit]
if p.exists():
data = [x.work_position for x in p]
elif "who_give:" in t:
tpk = t.split(":")[1]
p = Document.objects.filter(document_type__pk=tpk, who_give__istartswith=v).distinct('who_give')[:limit]
if p.exists():
data = [x.who_give for x in p]
elif t == "fsli":
if v == "HGB":
p = FsliRefbookTest.objects.filter(
Q(code_fsli__startswith=v) | Q(title__icontains=v) | Q(english_title__icontains=v) | Q(short_title__icontains=v) | Q(synonym__istartswith=v) | Q(synonym='Hb')
)
else:
p = FsliRefbookTest.objects.filter(
Q(code_fsli__startswith=v) | Q(title__icontains=v) | Q(english_title__icontains=v) | Q(short_title__icontains=v) | Q(synonym__istartswith=v)
)
p = p.filter(active=True).distinct('code_fsli').order_by('code_fsli', 'ordering')[:limit]
if p.exists():
data = [{"code_fsli": x.code_fsli, "short_title": x.short_title, "title": x.title, "sample": x.sample, "synonym": x.synonym, "nmu": x.code_nmu} for x in p]
elif t == "drugs":
data = [
{
"title": str(x),
"pk": x.pk,
}
for x in Drugs.objects.filter(Q(mnn__istartswith=v) | Q(trade_name__istartswith=v)).order_by('mnn', 'trade_name').distinct('mnn', 'trade_name')[:limit]
]
return JsonResponse({"data": data})
def laborants(request):
data = []
if SettingManager.l2('results_laborants'):
data = [{"pk": '-1', "fio": 'Не выбрано'}]
for d in users.DoctorProfile.objects.filter(user__groups__name="Лаборант", podrazdeleniye__p_type=users.Podrazdeleniya.LABORATORY).order_by('fio'):
data.append({"pk": str(d.pk), "fio": d.get_full_fio()})
return JsonResponse({"data": data, "doc": request.user.doctorprofile.has_group("Врач-лаборант")})
@login_required
def load_docprofile_by_group(request):
request_data = json.loads(request.body)
if request_data['group'] == '*':
users = users_all(request.user.doctorprofile.get_hospital_id())
else:
users = users_by_group(request_data['group'], request.user.doctorprofile.get_hospital_id())
users_grouped = {}
for row in users:
if row[2] not in users_grouped:
users_grouped[row[2]] = {'id': f"pord-{row[2]}", 'label': row[4] or row[3], 'children': []}
users_grouped[row[2]]['children'].append({'id': str(row[0]), 'label': row[1], 'podr': row[4] or row[3]})
return JsonResponse({"users": list(users_grouped.values())})
@login_required
@group_required("Создание и редактирование пользователей")
def users_view(request):
request_data = json.loads(request.body)
user_hospital_pk = request.user.doctorprofile.get_hospital_id()
hospital_pk = request_data.get('selected_hospital', user_hospital_pk)
can_edit = request.user.is_superuser or request.user.doctorprofile.all_hospitals_users_control or hospital_pk == user_hospital_pk
data = []
if can_edit:
podr = Podrazdeleniya.objects.filter(Q(hospital_id=hospital_pk) | Q(hospital__isnull=True)).exclude(p_type=Podrazdeleniya.HIDDEN, hospital__isnull=True).order_by("title")
for x in podr:
otd = {"pk": x.pk, "title": x.title, "users": []}
docs = users.DoctorProfile.objects.filter(podrazdeleniye=x, hospital_id=hospital_pk).order_by('fio')
if not request.user.is_superuser:
docs = docs.filter(user__is_superuser=False)
for y in docs:
otd["users"].append({"pk": y.pk, "fio": y.get_fio(), "username": y.user.username})
data.append(otd)
spec = users.Speciality.objects.filter(hide=False).order_by("title")
spec_data = [{"pk": -1, "title": "Не выбрано"}]
for s in spec:
spec_data.append({"pk": s.pk, "title": s.title})
positions_qs = users.Position.objects.filter(hide=False).order_by("title")
positions = [{"pk": -1, "title": "Не выбрано"}]
for s in positions_qs:
positions.append({"pk": s.pk, "title": s.title})
return JsonResponse({"departments": data, "specialities": spec_data, "positions": positions})
@login_required
@group_required("Создание и редактирование пользователей")
def user_view(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
resource_researches = []
if pk == -1:
data = {
"family": '',
"name": '',
"patronymic": '',
"username": '',
"department": '',
"email": '',
"groups": [],
"restricted_to_direct": [],
"users_services": [],
"groups_list": [{"pk": x.pk, "title": x.name} for x in Group.objects.all()],
"password": '',
"rmis_location": '',
"rmis_login": '',
"rmis_password": '',
"rmis_resource_id": '',
"doc_pk": -1,
"doc_code": -1,
"rmis_employee_id": '',
"rmis_service_id_time_table": '',
"snils": '',
"position": -1,
"sendPassword": False,
"external_access": False,
"date_stop_external_access": None,
"resource_schedule": resource_researches,
}
else:
doc: users.DoctorProfile = users.DoctorProfile.objects.get(pk=pk)
fio_parts = doc.get_fio_parts()
doc_schedule_obj = ScheduleResource.objects.filter(executor=doc)
resource_researches_temp = {}
doc_resource_pk_title = {k.pk: k.title for k in doc_schedule_obj}
doc_schedule = [i.pk for i in doc_schedule_obj]
if doc_schedule_obj:
researches_pks = get_resource_researches(tuple(doc_schedule))
for i in researches_pks:
if not resource_researches_temp.get(i.scheduleresource_id, None):
resource_researches_temp[i.scheduleresource_id] = [i.researches_id]
else:
temp_result = resource_researches_temp[i.scheduleresource_id]
temp_result.append(i.researches_id)
resource_researches_temp[i.scheduleresource_id] = temp_result.copy()
resource_researches = [{"pk": k, "researches": v, "title": doc_resource_pk_title[k]} for k, v in resource_researches_temp.items()]
data = {
"family": fio_parts[0],
"name": fio_parts[1],
"patronymic": fio_parts[2],
"username": doc.user.username,
"department": doc.podrazdeleniye_id,
"email": doc.email or '',
"groups": [x.pk for x in doc.user.groups.all()],
"restricted_to_direct": [x.pk for x in doc.restricted_to_direct.all()],
"users_services": [x.pk for x in doc.users_services.all()],
"groups_list": [{"pk": x.pk, "title": x.name} for x in Group.objects.all()],
"password": '',
"rmis_location": doc.rmis_location or '',
"rmis_login": doc.rmis_login or '',
"rmis_resource_id": doc.rmis_resource_id or '',
"rmis_password": '',
"doc_pk": doc.user.pk,
"personal_code": doc.personal_code,
"speciality": doc.specialities_id or -1,
"rmis_employee_id": doc.rmis_employee_id,
"rmis_service_id_time_table": doc.rmis_service_id_time_table,
"snils": doc.snils,
"position": doc.position_id or -1,
"sendPassword": False,
"external_access": doc.external_access,
"date_stop_external_access": doc.date_stop_external_access,
"resource_schedule": resource_researches,
}
return JsonResponse({"user": data})
@login_required
@group_required("Создание и редактирование пользователей")
def user_save_view(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
ok = True
message = ""
ud = request_data["user_data"]
username = ud["username"]
rmis_location = str(ud["rmis_location"]).strip() or None
rmis_employee_id = str(ud["rmis_employee_id"]).strip() or None
rmis_service_id_time_table = str(ud["rmis_service_id_time_table"]).strip() or None
rmis_login = ud["rmis_login"].strip() or None
rmis_password = ud["rmis_password"].strip() or None
personal_code = ud.get("personal_code", 0)
rmis_resource_id = ud["rmis_resource_id"].strip() or None
snils = ud.get("snils").strip() or ''
email = ud.get("email").strip() or None
position = ud.get("position", -1)
send_password = ud.get("sendPassword", False)
external_access = ud.get("external_access", False)
date_stop_external_access = ud.get("date_stop_external_access")
if date_stop_external_access == "":
date_stop_external_access = None
if position == -1:
position = None
user_hospital_pk = request.user.doctorprofile.get_hospital_id()
hospital_pk = request_data.get('hospital_pk', user_hospital_pk)
can_edit = request.user.is_superuser or request.user.doctorprofile.all_hospitals_users_control or hospital_pk == user_hospital_pk
if not can_edit:
return JsonResponse({"ok": False})
npk = pk
if pk == -1:
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(username)
user.is_active = True
user.save()
doc = users.DoctorProfile(user=user, fio=f'{ud["family"]} {ud["name"]} {ud["patronymic"]}')
doc.save()
doc.get_fio_parts()
npk = doc.pk
else:
ok = False
message = "Имя пользователя уже занято"
doc = None
else:
doc = users.DoctorProfile.objects.get(pk=pk)
if pk and doc and (not doc.user.is_superuser or request.user.is_superuser):
if ud["password"] != '':
doc.user.set_password(ud["password"])
doc.user.save()
if pk != -1 and doc.user.username != ud['username']:
if not User.objects.filter(username=username).exists():
doc.user.username = username
doc.user.save()
else:
ok = False
message = "Имя пользователя уже занято"
if email:
email = email.strip()
try:
if email:
validate_email(email)
except:
ok = False
message = f"Email {email} некорректный"
if users.DoctorProfile.objects.filter(email__iexact=email).exclude(pk=pk).exists():
ok = False
message = f"Email {email} уже занят"
if ok:
doc.user.groups.clear()
for g in ud["groups"]:
group = Group.objects.get(pk=g)
doc.user.groups.add(group)
doc.user.save()
doc.restricted_to_direct.clear()
for r in ud["restricted_to_direct"]:
doc.restricted_to_direct.add(DResearches.objects.get(pk=r))
doc.users_services.clear()
for r in ud["users_services"]:
doc.users_services.add(DResearches.objects.get(pk=r))
spec = ud.get('speciality', None)
if spec == -1:
spec = None
doc.podrazdeleniye_id = ud['department']
doc.specialities_id = spec
doc.family = ud["family"]
doc.name = ud["name"]
doc.patronymic = ud["patronymic"]
doc.fio = f'{ud["family"]} {ud["name"]} {ud["patronymic"]}'
doc.rmis_location = rmis_location
doc.rmis_employee_id = rmis_employee_id
doc.rmis_service_id_time_table = rmis_service_id_time_table
doc.personal_code = personal_code
doc.rmis_resource_id = rmis_resource_id
doc.hospital_id = hospital_pk
doc.snils = snils
doc.email = email
doc.position_id = position
doc.external_access = external_access
doc.date_stop_external_access = date_stop_external_access
if rmis_login:
doc.rmis_login = rmis_login
if rmis_password:
doc.rmis_password = rmis_password
else:
doc.rmis_login = None
doc.rmis_password = None
doc.save()
if doc.email and send_password:
doc.reset_password()
return JsonResponse({"ok": ok, "npk": npk, "message": message})
def slot_status(x):
s = 0
pk = None
n = directions.Napravleniya.objects.filter(rmis_slot_id=x["slot"]).first()
if n:
pk = n.pk
s = 1
if n.is_all_confirm():
s = 2
return {"code": s, "direction": pk}
@login_required
def user_location(request):
request_data = json.loads(request.body)
date = request_data["date"]
d = {}
rl = request.user.doctorprofile.rmis_location
if rl and SettingManager.get("l2_rmis_queue", default='false', default_type='b'):
if rl == 1337 and request.user.is_superuser:
from rmis_integration.client import Patients
d = Patients.get_fake_reserves()
else:
from rmis_integration.client import Client
c = Client(modules=['patients'])
d = c.patients.get_reserves(date, rl)
d = list(map(lambda x: {**x, "status": slot_status(x)}, d))
return JsonResponse({"data": d})
@login_required
def user_get_reserve(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
patient_uid = request_data["patient"]
rl = request.user.doctorprofile.rmis_location
if rl:
if rl == 1337 and request.user.is_superuser:
from rmis_integration.client import Patients
d = Patients.get_fake_slot()
else:
from rmis_integration.client import Client
c = Client(modules=['patients'])
d = c.patients.get_slot(pk)
n = directions.Napravleniya.objects.filter(rmis_slot_id=pk).first()
d["direction"] = n.pk if n else None
ds = directions.Issledovaniya.objects.filter(napravleniye=n, napravleniye__isnull=False).first()
d['direction_service'] = ds.research_id if ds else -1
if d:
return JsonResponse({**d, "datetime": d["datetime"].strftime('%d.%m.%Y %H:%M'), "patient_uid": patient_uid, "pk": int(str(pk)[1:]) if str(pk).isdigit() else str(pk)})
return JsonResponse({})
@login_required
def user_fill_slot(request):
slot = json.loads(request.body).get('slot', {})
slot_data = slot.get('data', {})
if directions.Napravleniya.objects.filter(rmis_slot_id=slot["id"]).exists():
direction = directions.Napravleniya.objects.filter(rmis_slot_id=slot["id"])[0].pk
else:
result = directions.Napravleniya.gen_napravleniya_by_issledovaniya(
slot["card_pk"],
"",
"ОМС",
"",
None,
request.user.doctorprofile,
{-1: [slot_data["direction_service"]]},
{},
False,
{},
vich_code="",
count=1,
discount=0,
parent_iss=None,
rmis_slot=slot["id"],
)
direction = result["list_id"][0]
return JsonResponse({"direction": direction})
@login_required
def job_types(request):
types = [{"pk": x.pk, "title": x.title} for x in directions.TypeJob.objects.filter(hide=False)]
g = Group.objects.filter(name="Зав. лабораторией").first()
is_zav_lab = (g and g in request.user.groups.all()) or request.user.is_superuser
users_list = [request.user.doctorprofile.get_data()]
if is_zav_lab:
for user in users.DoctorProfile.objects.filter(user__groups__name__in=["Лаборант", "Врач-лаборант"]).exclude(pk=request.user.doctorprofile.pk).order_by("fio").distinct():
users_list.append(user.get_data())
return JsonResponse({"types": types, "is_zav_lab": is_zav_lab, "users": users_list})
@login_required
def job_save(request):
data = json.loads(request.body)
ej = directions.EmployeeJob(type_job_id=data["type"], count=data["count"], doc_execute_id=data["executor"], date_job=try_strptime(data["date"]).date())
ej.save()
return JsonResponse({"ok": True})
@login_required
def job_list(request):
data = json.loads(request.body)
date = try_strptime(data["date"]).date()
g = Group.objects.filter(name="Зав. лабораторией").first()
is_zav_lab = (g and g in request.user.groups.all()) or request.user.is_superuser
users_list = [request.user.doctorprofile]
if is_zav_lab:
for user in users.DoctorProfile.objects.filter(user__groups__name__in=["Лаборант", "Врач-лаборант"]).exclude(pk=request.user.doctorprofile.pk).order_by("fio").distinct():
users_list.append(user)
result = []
for j in directions.EmployeeJob.objects.filter(doc_execute__in=users_list, date_job=date).order_by("doc_execute", "-time_save"):
result.append({"pk": j.pk, "executor": j.doc_execute.get_fio(), "type": j.type_job.title, "count": j.count, "saved": strdatetime(j.time_save), "canceled": bool(j.who_do_cancel)})
return JsonResponse({"list": result})
@login_required
def job_cancel(request):
data = json.loads(request.body)
j = directions.EmployeeJob.objects.get(pk=data["pk"])
g = Group.objects.filter(name="Зав. лабораторией").first()
is_zav_lab = (g and g in request.user.groups.all()) or request.user.is_superuser
if is_zav_lab or j.doc_execute == request.user.doctorprofile:
if data["cancel"]:
j.canceled_at = timezone.now()
j.who_do_cancel = request.user.doctorprofile
else:
j.canceled_at = j.who_do_cancel = None
j.save()
return JsonResponse({"ok": True})
@csrf_exempt
def reader_status(request):
data = json.loads(request.body)
reader_id = data.get('readerId', 'null')
data = json.loads(cache.get(f'reader-status:{reader_id}', '{"status": "none"}'))
return JsonResponse({"status": data.get('status'), "polis": data.get('polis'), "fio": data.get('fio'), 'details': data.get('details', {})})
@csrf_exempt
def reader_status_update(request):
data = json.loads(request.body)
reader_id = data.get('readerId')
if not reader_id:
return JsonResponse({"ok": True})
status = data['status']
if status == 'inserted':
polis = data['polis']
fio = data['fio']
cache.set(f'reader-status:{reader_id}', json.dumps({"status": 'inserted', "polis": polis, "fio": fio, "details": data['details']}), 10)
else:
cache.set(f'reader-status:{reader_id}', '{"status": "wait"}', 10)
return JsonResponse({"ok": True})
def actual_districts(request):
data = json.loads(request.body)
card_pk = data.get('card_pk')
rows = District.objects.all().order_by('-sort_weight', '-id').values('pk', 'title', 'is_ginekolog')
rows = [{"id": -1, "label": "НЕ ВЫБРАН"}, *[{"id": x['pk'], "label": x["title"] if not x['is_ginekolog'] else "Гинекология: {}".format(x['title'])} for x in rows]]
users = users_by_group(['Лечащий врач'], request.user.doctorprofile.get_hospital_id())
users = [{"id": -1, "label": "НЕ ВЫБРАН"}, *[{'id': row[0], 'label': row[1]} for row in users]]
purposes = DoctorCall.PURPOSES
purposes = [{'id': row[0], 'label': row[1]} for row in purposes]
hospitals = Hospitals.objects.filter(hide=False).order_by('short_title').values('pk', 'short_title', 'title', 'code_tfoms')
hospitals = [{"id": -1, "label": "НЕ ВЫБРАНО"}, *[{"id": x['pk'], "label": x["short_title"] or x["title"], "code_tfoms": x["code_tfoms"]} for x in hospitals]]
if card_pk is not None:
card_hospital_id = -1
if SettingManager.l2('tfoms'):
card = Card.objects.get(pk=data['card_pk'])
enp = card.individual.get_enp()
if enp:
from_tfoms = match_enp(card.individual.get_enp())
if from_tfoms and from_tfoms.get('unit_code'):
card_hospital_id = {x['code_tfoms']: x['id'] for x in hospitals if x.get("code_tfoms")}.get(from_tfoms['unit_code']) or -1
else:
card_hospital_id = -1
if card_hospital_id == -1 and len(hospitals) == 2:
card_hospital_id = hospitals[1]['id']
data = {'rows': rows, 'docs': users, 'purposes': purposes, 'hospitals': hospitals, 'hospitalId': card_hospital_id}
return JsonResponse(data)
def hospitals(request):
data = json.loads(request.body)
if request.user.is_authenticated and request.user.doctorprofile:
any_hospital = request.user.doctorprofile.all_hospitals_users_control
else:
any_hospital = False
filters = {}
if request.user.is_authenticated and request.user.doctorprofile:
if data.get('filterByUserHospital') and not any_hospital:
filters['pk'] = request.user.doctorprofile.get_hospital_id()
rows = Hospitals.objects.filter(hide=False, **filters).order_by('-is_default', 'short_title').values('pk', 'short_title', 'title', 'code_tfoms')
else:
rows = []
default_hospital = []
if any_hospital:
default_hospital = [
{
"id": -1,
"label": "Все",
"code_tfoms": "000000",
},
{
"id": -2,
"label": "Не выбрано",
"code_tfoms": "000001",
},
]
result = {
"hospitals": [
*[
{
"id": x['pk'],
"label": x["short_title"] or x["title"],
"code_tfoms": x["code_tfoms"],
}
for x in rows
],
*default_hospital,
]
}
if hasattr(request, 'plain_response') and request.plain_response:
return result
return JsonResponse(result)
def rmis_link(request):
d = request.user.doctorprofile
d_pass = urllib.parse.quote(d.rmis_password or '')
d_login = d.rmis_login or ''
auth_param = URL_RMIS_AUTH.replace('userlogin', d_login).replace('userpassword', d_pass)
if d.hospital.rmis_org_id and d.rmis_service_id_time_table and d.rmis_employee_id:
url_schedule = URL_SCHEDULE.replace('organization_param', d.hospital.rmis_org_id).replace('service_param', d.rmis_service_id_time_table).replace('employee_param', d.rmis_employee_id)
else:
url_schedule = None
return JsonResponse({'auth_param': auth_param, 'url_eln': URL_ELN_MADE, 'url_schedule': url_schedule})
@login_required
def get_permanent_directory(request):
request_data = json.loads(request.body)
oid = request_data.get('oid', '')
return JsonResponse(NSI.get(oid, {}))
@login_required
@group_required("Конструктор: Настройка скрининга")
def screening_get_directory(request):
rows = list(ScreeningPlan.objects.all().order_by('sort_weight').values('pk', 'age_start_control', 'age_end_control', 'sex_client', 'research_id', 'period', 'sort_weight', 'hide'))
n = 0
for r in rows:
if r['sort_weight'] != n:
r['sort_weight'] = n
p = ScreeningPlan.objects.get(pk=r['pk'])
p.sort_weight = n
p.save(update_fields=['sort_weight'])
n += 1
r['hasChanges'] = False
return JsonResponse({"rows": rows})
@login_required
@group_required("Конструктор: Настройка скрининга")
def screening_save(request):
parse_params = {
'screening': as_model(ScreeningPlan),
'service': as_model(DResearches),
'sex': str,
'ageFrom': int,
'ageTo': int,
'period': int,
'sortWeight': int,
'hide': bool,
}
data = data_parse(request.body, parse_params, {'screening': None, 'hide': False})
screening: Optional[ScreeningPlan] = data[0]
service: Optional[DResearches] = data[1]
sex: str = data[2]
age_from: int = data[3]
age_to: int = data[4]
period: int = data[5]
sort_weight: int = data[6]
hide: bool = data[7]
if not service:
return status_response(False, 'Не передана услуга или исследование')
try:
if not screening:
screening = ScreeningPlan.objects.create(research=service, sex_client=sex, age_start_control=age_from, age_end_control=age_to, period=period, sort_weight=sort_weight, hide=hide)
else:
screening.research = service
screening.sex_client = sex
screening.age_start_control = age_from
screening.age_end_control = age_to
screening.period = period
screening.sort_weight = sort_weight
screening.hide = hide
screening.save()
except IntegrityError:
return status_response(False, 'Такой скрининг уже есть!')
return status_response(True)
@login_required
def companies(request):
rows = [{'id': x.pk, 'label': x.short_title or x.title} for x in Company.objects.filter(active_status=True).order_by('short_title')]
return JsonResponse({'rows': rows})
@login_required
def input_templates_add(request):
data = json.loads(request.body)
pk = data["pk"]
value = str(data["value"]).strip()
value_lower = value.lower()
doc = request.user.doctorprofile
if ParaclinicUserInputTemplateField.objects.filter(field_id=pk, doc=doc, value=value).exists():
t = ParaclinicUserInputTemplateField.objects.filter(field_id=pk, doc=doc, value=value)[0]
if t.value_lower != value_lower:
t.value_lower = value_lower
t.save()
return JsonResponse({"ok": False})
t = ParaclinicUserInputTemplateField.objects.create(field_id=pk, doc=doc, value=value, value_lower=value_lower)
return JsonResponse({"ok": True, "pk": t.pk})
@login_required
def input_templates_get(request):
data = json.loads(request.body)
pk = data["pk"]
doc = request.user.doctorprofile
rows = [{"pk": x.pk, "value": x.value} for x in ParaclinicUserInputTemplateField.objects.filter(field_id=pk, doc=doc).order_by("pk")]
return JsonResponse({"rows": rows})
@login_required
def input_templates_delete(request):
data = json.loads(request.body)
pk = data["pk"]
doc = request.user.doctorprofile
ParaclinicUserInputTemplateField.objects.filter(pk=pk, doc=doc).delete()
return JsonResponse({"ok": True})
@login_required
def input_templates_suggests(request):
data = json.loads(request.body)
pk = data["pk"]
value = str(data["value"]).strip().lower()
doc = request.user.doctorprofile
rows = list(
ParaclinicUserInputTemplateField.objects.filter(field_id=pk, doc=doc, value_lower__startswith=value)
.exclude(value_lower=value)
.order_by('value_lower')
.values_list('value', flat=True)[:4]
)
return JsonResponse({"rows": rows, "value": data["value"]})
@login_required
def construct_menu_data(request):
groups = [str(x) for x in request.user.groups.all()]
pages = [
{"url": "/construct/tubes", "title": "Ёмкости для биоматериала", "access": ["Конструктор: Ёмкости для биоматериала"], "module": None},
{"url": "/construct/researches", "title": "Лабораторные исследования", "access": ["Конструктор: Лабораторные исследования"], "module": None},
{
"url": "/construct/researches-paraclinic",
"title": "Описательные исследования и консультации",
"access": ["Конструктор: Параклинические (описательные) исследования"],
"module": "paraclinic_module",
},
{"url": "/construct/directions_group", "title": "Группировка исследований по направлениям", "access": ["Конструктор: Группировка исследований по направлениям"], "module": None},
{"url": "/construct/uets", "title": "Настройка УЕТов", "access": ["Конструктор: Настройка УЕТов"], "module": None},
{"url": "/construct/templates", "title": "Настройка шаблонов", "access": ["Конструктор: Настройка шаблонов"], "module": None},
{"url": "/construct/bacteria", "title": "Бактерии и антибиотики", "access": ["Конструктор: Бактерии и антибиотики"], "module": None},
{"url": "/construct/dplan", "title": "Д-учет", "access": ["Конструктор: Д-учет"], "module": None},
{"url": "/ui/construct/screening", "title": "Настройка скрининга", "access": ["Конструктор: Настройка скрининга"], "module": None},
{"url": "/ui/construct/org", "title": "Настройка организации", "access": ["Конструктор: Настройка организации"], "module": None},
]
from context_processors.utils import make_menu
menu = make_menu(pages, groups, request.user.is_superuser)
return JsonResponse(
{
"menu": menu,
}
)
@login_required
def current_org(request):
hospital: Hospitals = request.user.doctorprofile.get_hospital()
org = {
"pk": hospital.pk,
"title": hospital.title,
"shortTitle": hospital.short_title,
"address": hospital.address,
"phones": hospital.phones,
"ogrn": hospital.ogrn,
"www": hospital.www,
"email": hospital.email,
"licenseData": hospital.license_data,
"currentManager": hospital.current_manager,
"okpo": hospital.okpo,
}
return JsonResponse({"org": org})
@login_required
@group_required('Конструктор: Настройка организации')
def current_org_update(request):
parse_params = {'title': str, 'shortTitle': str, 'address': str, 'phones': str, 'ogrn': str, 'currentManager': str, 'licenseData': str, 'www': str, 'email': str, 'okpo': str}
data = data_parse(request.body, parse_params, {'screening': None, 'hide': False})
title: str = data[0].strip()
short_title: str = data[1].strip()
address: str = data[2].strip()
phones: str = data[3].strip()
ogrn: str = data[4].strip()
current_manager: str = data[5].strip()
license_data: str = data[6].strip()
www: str = data[7].strip()
email: str = data[8].strip()
okpo: str = data[9].strip()
if not title:
return status_response(False, 'Название не может быть пустым')
hospital: Hospitals = request.user.doctorprofile.get_hospital()
old_data = {
"title": hospital.title,
"short_title": hospital.short_title,
"address": hospital.address,
"phones": hospital.phones,
"ogrn": hospital.ogrn,
"current_manager": hospital.current_manager,
"license_data": hospital.license_data,
"www": hospital.www,
"email": hospital.email,
"okpo": hospital.okpo,
}
new_data = {
"title": title,
"short_title": short_title,
"address": address,
"phones": phones,
"ogrn": ogrn,
"current_manager": current_manager,
"license_data": license_data,
"www": www,
"email": email,
"okpo": okpo,
}
hospital.title = title
hospital.short_title = short_title
hospital.address = address
hospital.phones = phones
hospital.ogrn = ogrn
hospital.current_manager = current_manager
hospital.license_data = license_data
hospital.www = www
hospital.email = email
hospital.okpo = okpo
hospital.save()
Log.log(
hospital.pk,
110000,
request.user.doctorprofile,
{
"oldData": old_data,
"newData": new_data,
},
)
return status_response(True)
@login_required
def get_links(request):
if not SOME_LINKS:
return JsonResponse({"rows": []})
return JsonResponse({"rows": SOME_LINKS})
@login_required
def get_disabled_forms(request):
user_disabled_forms = request.user.doctorprofile.disabled_forms.split(",")
user_disabled_forms.extend(DISABLED_FORMS)
result_disabled_forms = set(user_disabled_forms)
if len(result_disabled_forms) == 0:
return JsonResponse({"rows": []})
return JsonResponse({"rows": list(result_disabled_forms)})
@login_required
def get_disabled_categories(request):
disabled_statistic_categories = request.user.doctorprofile.disabled_statistic_categories.split(",")
disabled_statistic_categories.extend(DISABLED_STATISTIC_CATEGORIES)
result_disabled_statistic_categories = set(disabled_statistic_categories)
if len(result_disabled_statistic_categories) == 0:
return JsonResponse({"rows": []})
return JsonResponse({"rows": list(result_disabled_statistic_categories)})
@login_required
def get_disabled_reports(request):
disabled_statistic_reports = request.user.doctorprofile.disabled_statistic_reports.split(",")
disabled_statistic_reports.extend(DISABLED_STATISTIC_REPORTS)
result_disabled_statistic_reports = set(disabled_statistic_reports)
if len(result_disabled_statistic_reports) == 0:
return JsonResponse({"rows": []})
return JsonResponse({"rows": list(result_disabled_statistic_reports)})
@login_required
def org_generators(request):
hospital: Hospitals = request.user.doctorprofile.get_hospital()
rows = []
g: directions.NumberGenerator
for g in directions.NumberGenerator.objects.filter(hospital=hospital).order_by('pk'):
rows.append(
{
"pk": g.pk,
"key": g.key,
"keyDisplay": g.get_key_display(),
"year": g.year,
"isActive": g.is_active,
"start": g.start,
"end": g.end,
"last": g.last,
"prependLength": g.prepend_length,
}
)
return JsonResponse({"rows": rows})
@login_required
@group_required('Конструктор: Настройка организации')
def org_generators_add(request):
hospital: Hospitals = request.user.doctorprofile.get_hospital()
parse_params = {
'key': str,
'year': int,
'start': int,
'end': int,
'prependLength': int,
}
data = data_parse(request.body, parse_params, {'screening': None, 'hide': False})
key: str = data[0]
year: int = data[1]
start: int = data[2]
end: int = data[3]
prepend_length: int = data[4]
with transaction.atomic():
directions.NumberGenerator.objects.filter(hospital=hospital, key=key, year=year).update(is_active=False)
directions.NumberGenerator.objects.create(hospital=hospital, key=key, year=year, start=start, end=end, prepend_length=prepend_length, is_active=True)
Log.log(
hospital.pk,
110000,
request.user.doctorprofile,
{
"key": key,
"year": year,
"start": start,
"end": end,
"prepend_length": prepend_length,
},
)
return status_response(True)
def current_time(request):
now = timezone.now().astimezone(pytz.timezone(TIME_ZONE))
return JsonResponse({
"date": now.strftime('%Y-%m-%d'),
"time": now.strftime('%X'),
})
| moodpulse/l2 | api/views.py | Python | mit | 90,702 | [
"VisIt"
] | 098de92b0ec2d7b5ef302701da59263a282372235e9c80535e2b2d7c51722af8 |
#!/usr/bin/env python3
import unittest
import sys
import os
from raw_data_parsers.play_by_play.general import row_type, get_kicking_offense
class TestPlayByPlay(unittest.TestCase):
def __set_kicking_consts(self):
"""Set play stings to be used by the kicking tests."""
# Turnovers
self.kicks = (
"B. Simpson kicks off 70 yards, returned by M. Simpson for 20 yards (tackle by L. Simpson )",
"Moe Howard kicks onside 12 yards, recovered by Shemp Howard . Larry Fine fumbles, recovered by Curly Howard at LOC -41",
"Louis C.K. Kicks off.",
)
def test_row_type(self):
# Successful
self.assertEqual(row_type("End of Overtime 38 35 0 0"), 6)
self.assertEqual(
row_type(
"Quarter Time Down ToGo Location Detail RAV DEN EPB EPA"
),
-1
)
self.assertEqual(
row_type("""OT 15:00 1 10 DEN 34R Player passes but there are
dinosaurs on the field! 35 35 3.31 3.04"""),
0
)
self.assertEqual(row_type("Overtime"), 5)
self.assertEqual(row_type("1st Quarter"), 1)
self.assertEqual(row_type("2nd Quarter"), 2)
self.assertEqual(row_type("3rd Quarter"), 3)
self.assertEqual(row_type("4th Quarter"), 4)
def test_get_kicking_offense(self):
self.__set_kicking_consts()
# Successful
self.assertEqual(
get_kicking_offense(
"DEN 35", "", "DEN", "SEA", (), ()
),
"away"
)
self.assertEqual(
get_kicking_offense(
"SEA 35", "", "DEN", "SEA", (), ()
),
"home"
)
self.assertEqual(
get_kicking_offense(
"DEN 50", "", "DEN", "SEA", (), ()
),
"away"
)
self.assertEqual(
get_kicking_offense("DEN", "", "DEN", "SEA", (), ()),
"away"
)
self.assertEqual(
get_kicking_offense(
"",
self.kicks[0],
"DEN",
"SEA",
("B. Simpson", "L. Simpson"),
("M. Simpson",)
),
"away"
)
self.assertEqual(
get_kicking_offense(
"",
self.kicks[1],
"DEN",
"SEA",
("Shemp Howard",),
("Moe Howard", "Larry Fine", "Curly Howard")
),
"home"
)
# Failure
self.assertRaises(
KeyError,
get_kicking_offense, "PTC 35", "", "", "", (), ()
)
# We squelch the warning from this test, we want the warning when
# running on data, but not when testing
with open(os.devnull, 'w') as f:
oldstdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
# The squelched tests
# Unknown kicker
self.assertEqual(
get_kicking_offense("", self.kicks[2], "", "", (), ()),
None
)
# Degenerate kicker
self.assertEqual(
get_kicking_offense(
"", self.kicks[2], "", "", ("Player8",), ("Player8",)
),
None
)
# Unknown team
self.assertEqual(
get_kicking_offense(
"DEN 35", self.kicks[2], "SEA", "SFO", (), ()
),
None
)
# Return stdout
sys.stdout = oldstdout
f.close()
if __name__ == '__main__':
unittest.main()
| chizarlicious/Football-Data-Converter | code/tests/play_by_play/test_general.py | Python | gpl-3.0 | 4,105 | [
"MOE"
] | 2e709db29ededf4fb0e1cd5dfff6fd3226e71c585c93a2d01f9335db2cad8af7 |
"""
Unit tests for the `ambient` module of ``TAMOC``
Provides testing of all of the functions, classes and methods in the `ambient`
module. These tests rely on data stored in the ``./data`` folder and will
write data to and read data from the ``./test/output`` folder.
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
import tamoc
from tamoc import ambient
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_approx_equal
from scipy.interpolate import interp1d
from datetime import datetime
from netCDF4 import Dataset, date2num, num2date
# ----------------------------------------------------------------------------
# Functions used by unit tests
# ----------------------------------------------------------------------------
# Get a platform-independent path to the datafile
DATA_DIR = os.path.realpath(os.path.join(os.path.dirname(tamoc.__file__),'data'))
OUTPUT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__),'output'))
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
def get_units(data, units, nr, nc, mks_units, ans=None):
"""
Run the ambient.convert_units function and test that the data are
correctly converted per the inputs given above.
"""
# Apply the units conversion function
data, units = ambient.convert_units(data, units)
# Check shape of output compared to input
assert np.atleast_2d(data).shape[0] == nr
assert np.atleast_2d(data).shape[1] == nc
# Check units converted as expected
for i in range(len(units)):
assert units[i] == mks_units[i]
# Check numerical result is correct if known
if ans is not None:
assert_array_almost_equal(data, ans, decimal = 6)
# Send back the converted data
return (data, units)
def get_profile(data, z_col, z_start, p_col, P, z_min, z_max, nr, nc):
"""
Run the ambient.extract_profile function and test that the data are
correctly parsed per the inputs given above.
"""
# Apply the profile extraction function
prof_data = ambient.extract_profile(data, z_col=z_col, z_start=z_start,
p_col=p_col, P_atm=P)
# Check that the returned profile extends to the free surface
assert prof_data[0,z_col] == 0.0
# Check that the profile is clipped at the expected depths
assert_approx_equal(prof_data[1,z_col], z_min, significant = 6)
assert_approx_equal(prof_data[-1,z_col], z_max, significant = 6)
# Check that the returned profile is the right shape and data type
assert prof_data.shape[0] == nr
if nc is not None:
assert prof_data.shape[1] == nc
assert isinstance(prof_data, np.ndarray)
# Check the that returned profile is in ascending order
for i in range(1, prof_data.shape[0]):
assert prof_data[i,z_col] > prof_data[i-1,z_col]
# Send back the extracted profile
return prof_data
def check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time):
"""
Use the ambient.create_nc_db() function to create a netCDF4-classic
dataset from the given inputs and then check whether the dataset is
created properly.
"""
# Create the dataset
nc = ambient.create_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Access the variables in the dataset
time = nc.variables['time']
lat = nc.variables['lat']
lon = nc.variables['lon']
z = nc.variables['z']
T = nc.variables['temperature']
S = nc.variables['salinity']
P = nc.variables['pressure']
# Check that the global attributes are set correctly
assert nc.summary == summary
assert nc.source == source
assert nc.sea_name == sea_name
# Check that the imutable data are written properly
assert lat[0] == p_lat
assert lon[0] == p_lon
assert time[0] == p_time
assert z.shape == (0,)
# Check the units are correct on the following variables
assert z.units == 'm'
assert T.units == 'K'
assert S.units == 'psu'
assert P.units == 'Pa'
# Send back the template database
return nc
def get_filled_nc_db(nc, data, symbols, units, comments, z_col,
long_names, std_names):
"""
Check that data written to a netCDF dataset has been stored correctly.
"""
# Store the data in the netCDF dataset
z_len = nc.variables['z'][:].shape
nc = ambient.fill_nc_db(nc, data, symbols, units, comments, z_col)
# Check that data and metadata were stored properly
if len(symbols) == 1:
data = np.atleast_2d(data).transpose()
for i in range(len(symbols)):
assert_array_almost_equal(nc.variables[symbols[i]][:],
data[:,i], decimal = 6)
assert nc.variables[symbols[i]].long_name == long_names[i]
assert nc.variables[symbols[i]].standard_name == std_names[i]
assert nc.variables[symbols[i]].units == units[i]
assert nc.variables[symbols[i]].comment == comments[i]
# Send back the correctly-filled dataset
return nc
def get_profile_obj(nc, chem_names, chem_units):
"""
Check that an ambient.Profile object is created correctly and that the
methods operate as expected.
"""
if isinstance(chem_names, str):
chem_names = [chem_names]
if isinstance(chem_units, str):
chem_units = [chem_units]
# Create the profile object
prf = ambient.Profile(nc, chem_names=chem_names)
# Check the chemical names and units are correct
for i in range(len(chem_names)):
assert prf.chem_names[i] == chem_names[i]
assert prf.nchems == len(chem_names)
# Check the error criteria on the interpolator
assert prf.err == 0.01
# Check the get_units method
name_list = ['temperature', 'salinity', 'pressure'] + chem_names
unit_list = ['K', 'psu', 'Pa'] + chem_units
for i in range(len(name_list)):
assert prf.get_units(name_list[i])[0] == unit_list[i]
units = prf.get_units(name_list)
for i in range(len(name_list)):
assert units[i] == unit_list[i]
# Check the interpolator function ...
# Pick a point in the middle of the raw dataset and read off the depth
# and the values of all the variables
nz = prf.ds.dims['z'] // 2
z = prf.ds.coords['z'][nz]
y = np.zeros(len(name_list))
for name in name_list:
y[name_list.index(name)] = prf.ds[name].values[nz]
# Get an interpolated set of values at this same elevation
yp = prf.get_values(z, name_list)
# Check if the results are within the level of error expected by err
for i in range(len(name_list)):
assert np.abs((yp[i] - y[i]) / yp[i]) <= prf.err
# Next, check that the variables returned by the get_values function are
# the variables we expect
nz = prf.nc.variables['z'].shape[0] // 2
z = float(prf.nc.variables['z'][nz])
Tp, Sp, Pp = prf.get_values(z, ['temperature', 'salinity', 'pressure'])
T = prf.nc.variables['temperature'][nz]
S = prf.nc.variables['salinity'][nz]
P = prf.nc.variables['pressure'][nz]
assert np.abs((Tp - T) / T) <= prf.err
assert np.abs((Sp - S) / S) <= prf.err
assert np.abs((Pp - P) / P) <= prf.err
if prf.nchems > 0:
c = np.zeros(prf.nchems)
cp = np.zeros(prf.nchems)
for i in range(prf.nchems):
c[i] = prf.nc.variables[chem_names[i]][nz]
cp[i] = prf.get_values(z, chem_names[i])
assert np.abs((cp[i] - c[i]) / c[i]) <= prf.err
# Test the append() method by inserting the temperature data as a new
# profile, this time in degrees celsius using the variable name temp
n0 = prf.nchems
z = prf.ds.coords['z'].values
T = prf.ds['temperature'].values
T_degC = T - 273.15
assert_array_almost_equal(T_degC + 273.15, T, decimal = 6)
data = np.vstack((z, T_degC)).transpose()
symbols = ['z', 'temp']
units = ['m', 'deg C']
comments = ['measured', 'identical to temperature, but in deg C']
prf.append(data, symbols, units, comments, 0)
# Check that the data were inserted correctly
Tnc = prf.ds['temp'].values
assert_array_almost_equal(Tnc, T_degC+273.15, decimal = 6)
assert prf.nc.variables['temp'].units == 'deg C'
# Check that get_values works correctly with vector inputs for depth
depths = np.linspace(prf.nc.variables['z'].valid_min,
prf.nc.variables['z'].valid_max, 100)
Temps = prf.get_values(depths, ['temperature', 'temp'])
for i in range(len(depths)):
assert_approx_equal(Temps[i,0], Temps[i,1], significant = 6)
# Make sure the units are returned correctly
assert prf.get_units('temp')[0] == 'K'
assert prf.ds['temp'].attrs['units'] == 'K'
# Check that temp is now listed as a chemical
assert prf.nchems == n0 + 1
assert prf.chem_names[-1] == 'temp'
# Test the API for calculating the buoyancy frequency (note that we do
# not check the result, just that the function call does not raise an
# error)
N = prf.buoyancy_frequency(depths)
N = prf.buoyancy_frequency(depths[50], h=0.1)
# Send back the Profile object
return prf
def check_net_numpy(net_ds, num_ds):
"""
Check that an ambient.Profile object is created correctly and that the
methods operate as expected.
"""
chem_names = net_ds.chem_names
chem_units = net_ds.chem_units
# Check the chemical names and units are correct
for i in range(3):
assert num_ds.chem_names[i] in chem_names
assert num_ds.chem_units[i] in chem_units
assert num_ds.nchems == 4
# Check the error criteria on the interpolator
assert num_ds.err == 0.01
# Check the get_units method
name_list = ['temperature', 'salinity', 'pressure'] + chem_names[0:3]
unit_list = ['K', 'psu', 'Pa'] + chem_units[0:3]
for i in range(3):
assert num_ds.get_units(name_list[i])[0] == unit_list[i]
units = num_ds.get_units(name_list)
for i in range(3):
assert units[i] == unit_list[i]
# Check the interpolator function ...
z = np.linspace(num_ds.z_min, num_ds.z_max, 100)
# Next, check that the variables returned by the get_values function are
# the variables we expect
for depth in z:
assert num_ds.get_values(depth, 'temperature') == \
net_ds.get_values(depth, 'temperature')
assert num_ds.get_values(depth, 'salinity') == \
net_ds.get_values(depth, 'salinity')
assert num_ds.get_values(depth, 'pressure') == \
net_ds.get_values(depth, 'pressure')
# Test the append() method by inserting the temperature data as a new
# profile, this time in degrees celsius using the variable name temp
net_temp = net_ds.ds['temp'].values
num_temp = num_ds.ds['temp'].values
assert_array_almost_equal(net_temp, num_temp, decimal = 6)
assert num_ds.get_units('temp')[0] == 'K'
# Check that get_values works correctly with vector inputs for depth
Temps = num_ds.get_values(z, ['temperature', 'temp'])
for i in range(len(z)):
assert_approx_equal(Temps[i,0], Temps[i,1], significant = 6)
# Make sure the units are returned correctly
assert num_ds.get_units('temp')[0] == 'K'
# Check that temp is now listed as a chemical
assert num_ds.chem_names[-1] == 'temp'
# Test the API for calculating the buoyancy frequency (note that we do
# not check the result, just that the function call does not raise an
# error)
N_num = num_ds.buoyancy_frequency(z)
N_net = net_ds.buoyancy_frequency(z)
assert_array_almost_equal(N_num, N_net, decimal=6)
# ----------------------------------------------------------------------------
# Unit tests
# ----------------------------------------------------------------------------
def test_conv_units():
"""
Test the units conversion methods to make sure they produce the expected
results.
"""
# Test conversion of 2d array data
data = np.array([[10, 25.4, 9.5, 34], [100, 10.7, 8.4, 34.5]])
units = ['m', 'deg C', 'mg/l', 'psu']
mks_units = ['m', 'K', 'kg/m^3', 'psu']
ans = np.array([[1.00000000e+01, 2.98550000e+02, 9.50000000e-03,
3.40000000e+01],
[1.00000000e+02, 2.83850000e+02, 8.40000000e-03,
3.45000000e+01]])
data, units = get_units(data, units, 2, 4, mks_units, ans)
# Test conversion of scalar data
data = 10.
data, units = get_units(data, 'deg C', 1, 1, ['K'],
np.array([273.15+10.]))
# Test conversion of a row of data
data = [10, 25.4, 9.5, 34]
units = ['m', 'deg C', 'mg/l', 'psu']
mks_units = ['m', 'K', 'kg/m^3', 'psu']
ans = np.array([1.00000000e+01, 2.98550000e+02, 9.50000000e-03,
3.40000000e+01])
data, units = get_units(data, units, 1, 4, mks_units, ans)
# Test conversion of a column of data
data = np.array([[10., 20., 30., 40]]).transpose()
unit = 'deg C'
ans = np.array([[ 283.15], [293.15], [303.15], [313.15]])
data, units = get_units(data, unit, 4, 1, ['K'], ans)
def test_from_ctd():
"""
Test the ambient data methods on a Sea-Bird SBE 19plus Data File.
This unit test reads in the CTD data from ./data/ctd.BM54.cnv using
`numpy.loadtxt` and then uses this data to test the data manipulation and
storage methods in ambient.py.
"""
dfile = os.path.join(DATA_DIR,'ctd_BM54.cnv')
# Load in the raw data using np.loadtxt
raw = np.loadtxt(dfile, comments = '#', skiprows = 175,
usecols = (0, 1, 3, 8, 9, 10, 12))
# State the units of the input data (read by hand from the file)
units = ['deg C', 'db', 'mg/m^3', 'm', 'psu', 'kg/m^3', 'mg/l']
# State the equivalent mks units (translated here by hand)
mks_units = ['K', 'Pa', 'kg/m^3', 'm', 'psu', 'kg/m^3', 'kg/m^3']
# Clean the profile to remove depth reversals
z_col = 3
p_col = 1
profile = get_profile(raw, z_col, 50, p_col, 0., 2.124, 1529.789, 11074,
7)
# Convert the profile to standard units
profile, units = get_units(profile, units, 11074, 7, mks_units)
# Create an empty netCDF4-classic dataset to store the CTD information
nc_file = os.path.join(OUTPUT_DIR,'test_BM54.nc')
summary = 'Py.Test test file'
source = 'R/V Brooks McCall, station BM54'
sea_name = 'Gulf of Mexico'
p_lat = 28.0 + 43.945 / 60.0
p_lon = 360 - (88.0 + 22.607 / 60.0)
p_time = date2num(datetime(2010, 5, 30, 18, 22, 12),
units = 'seconds since 1970-01-01 00:00:00 0:00',
calendar = 'julian')
nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Fill the netCDF4-classic dataset with the data in profile
symbols = ['temperature', 'pressure', 'wetlab_fluorescence', 'z',
'salinity', 'density', 'oxygen']
comments = ['measured', 'measured', 'measured', 'measured', 'measured',
'measured', 'measured']
long_names = ['Absolute temperature', 'pressure', 'Wetlab fluorescence',
'depth below the water surface', 'Practical salinity',
'Density', 'Oxygen']
std_names = ['temperature', 'pressure', 'wetlab fluorescence', 'depth',
'salinity', 'density', 'oxygen']
nc = get_filled_nc_db(nc, profile, symbols, units, comments, z_col,
long_names, std_names)
# Create a Profile object from this netCDF dataset and test the Profile
# methods
bm54 = get_profile_obj(nc, ['oxygen'], ['kg/m^3'])
# Close down the pipes to the netCDF dataset files
bm54.nc.close()
def test_from_txt():
"""
Test the ambient data methods on simple text files.
This unit test reads in the text files ./data/C.dat and
./data/T.dat using `numpy.loadtxt` and then uses this data to test
the data manipulation and storage methods in ambient.py.
"""
cdat_file = os.path.join(DATA_DIR,'C.dat')
tdat_file = os.path.join(DATA_DIR,'T.dat')
# Load in the raw data using np.loadtxt
C_raw = np.loadtxt(cdat_file, comments = '%')
T_raw = np.loadtxt(tdat_file, comments = '%')
# Clean the profile to remove depth reversals
C_data = get_profile(C_raw, 1, 25, None, 0., 1.0256410e+01, 8.0000000e+02,
34, 2)
T_data = get_profile(T_raw, 1, 25, None, 0., 1.0831721e+01, 7.9922631e+02,
34, 2)
# Convert the data to standard units
C_data, C_units = get_units(C_data, ['psu', 'm'], 34, 2, ['psu', 'm'])
T_data, T_units = get_units(T_data, ['deg C', 'm'], 34, 2, ['K', 'm'])
# Create an empty netCDF4-classic dataset to store the CTD information
nc_file = os.path.join(OUTPUT_DIR,'test_DS.nc')
summary = 'Py.Test test file'
source = 'Profiles from the SINTEF DeepSpill Report'
sea_name = 'Norwegian Sea'
p_lat = 64.99066
p_lon = 4.84725
p_time = date2num(datetime(2000, 6, 27, 12, 0, 0),
units = 'seconds since 1970-01-01 00:00:00 0:00',
calendar = 'julian')
nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Fill the netCDF4-classic dataset with the data in the salinity profile
symbols = ['salinity', 'z']
comments = ['measured', 'measured']
long_names = ['Practical salinity', 'depth below the water surface']
std_names = ['salinity', 'depth']
nc = get_filled_nc_db(nc, C_data, symbols, C_units, comments, 1,
long_names, std_names)
# Because the temperature data will be interpolated to the vertical
# coordinates in the salinity profile, insert the data and test that
# insertion worked correctly by hand
symbols = ['temperature', 'z']
comments = ['measured', 'measured']
long_names = ['Absolute temperature', 'depth below the water surface']
std_names = ['temperature', 'depth']
nc = ambient.fill_nc_db(nc, T_data, symbols, T_units, comments, 1)
assert_array_almost_equal(nc.variables['z'][:],
C_data[:,1], decimal = 6)
z = nc.variables['z'][:]
T = nc.variables['temperature'][:]
f = interp1d(z, T)
for i in range(T_data.shape[0]):
assert_approx_equal(T_data[i,0], f(T_data[i,1]), significant = 5)
assert nc.variables['temperature'].comment == comments[0]
# Calculate and insert the pressure data
z = nc.variables['z'][:]
T = nc.variables['temperature'][:]
S = nc.variables['salinity'][:]
P = ambient.compute_pressure(z, T, S, 0)
P_data = np.vstack((z, P)).transpose()
nc = ambient.fill_nc_db(nc, P_data, ['z', 'pressure'], ['m', 'Pa'],
['measured', 'computed'], 0)
# Test the Profile object
ds = get_profile_obj(nc, [], [])
# Close down the pipes to the netCDF dataset files
ds.close_nc()
return ds
def test_using_numpy():
"""
Test the ambient data methods using only numpy
This unit test repeats the tests in `test_from_txt()`, but using only
the `numpy` array part of the `Profile` object instead of a netCDF
dataset.
"""
# Get the profile objuect using netCDF datasets
net_profile = test_from_txt()
# Get a platform-independent path to the datafile
cdat_file = os.path.join(DATA_DIR,'C.dat')
tdat_file = os.path.join(DATA_DIR,'T.dat')
# Load in the raw data using np.loadtxt
C_raw = np.loadtxt(cdat_file, comments = '%')
T_raw = np.loadtxt(tdat_file, comments = '%')
# Clean the profile to remove depth reversals
C_data = get_profile(C_raw, 1, 25, None, 0., 1.0256410e+01,
8.0000000e+02, 34, 2)
T_data = get_profile(T_raw, 1, 25, None, 0., 1.0831721e+01,
7.9922631e+02, 34, 2)
# Convert the data to standard units
C_data, C_units = get_units(C_data, ['psu', 'm'], 34, 2, ['psu', 'm'])
T_data, T_units = get_units(T_data, ['deg C', 'm'], 34, 2, ['K', 'm'])
# Create an numpy array to hold depth and salinity
var_names = ['depth', 'salinity']
var_units = ['m', 'psu']
data = np.zeros((C_data.shape[0], 3))
data[:,0] = C_data[:,1]
data[:,2] = C_data[:,0]
# Add the temperature data using the existing depth data
data = ambient.add_data(data, 1, 'temperature', T_data, ['temperature',
'z'], T_units, ['measured', 'measured'], 1)
z = data[:,0]
T = data[:,1]
S = data[:,2]
P = ambient.compute_pressure(z, T, S, 0)
P_data = np.vstack((z, P)).transpose()
data = ambient.add_data(data, 3, 'pressure', P_data, ['z', 'pressure'],
['m', 'Pa'], ['measured', 'measured'], 0)
# Select some current data
current = np.array([0.15, 0.])
current_units =['m/s', 'm/s']
# Create the profile object
ztsp = ['z', 'temperature', 'salinity', 'pressure']
ztsp_units = ['m', 'K', 'psu', 'Pa']
ds = ambient.Profile(data, ztsp, None, 0.01, ztsp_units, None,
current=current, current_units=current_units)
# Add these currents to the netCDF profile
current = np.array([[0., 0.15, 0., 0.],
[800., 0.15, 0., 0.]])
current_names = ['z', 'ua', 'va', 'wa']
current_units = ['m', 'm/s', 'm/s', 'm/s']
net_profile.append(current, current_names, current_units, z_col=0)
# Add the 'temp' data to the numpy dataset
z = ds.ds.coords['z'].values
T = ds.ds['temperature'].values
T_degC = T - 273.15
data = np.vstack((z, T_degC)).transpose()
symbols = ['z', 'temp']
units = ['m', 'deg C']
comments = ['measured', 'identical to temperature, but in deg C']
ds.append(data, symbols, units, comments, 0)
# Check if the two objects are equal
check_net_numpy(net_profile, ds)
return ds
def test_from_calcs():
"""
Test the ambient data methods on synthetic profiles.
This unit test creates synthetic data (e.g., profiles matching laboratory
idealized conditions) and then uses this data to test the data
manipulation and storage methods in ambient.py.
"""
# Create the synthetic temperature and salinity profiles
z = np.array([0.0, 2.4])
T = np.array([21.0, 20.0])
S = np.array([0.0, 30.0])
# Create an empty netCDF4-classic dataset to store the CTD information
nc_file = os.path.join(OUTPUT_DIR,'test_Lab.nc')
summary = 'Py.Test test file'
source = 'Synthetic profiles for idealized laboratory conditions'
sea_name = 'None'
p_lat = -999
p_lon = -999
p_time = date2num(datetime(2013, 7, 12, 11, 54, 0),
units = 'seconds since 1970-01-01 00:00:00 0:00',
calendar = 'julian')
nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Convert the temperature units
T, T_units = get_units(T, ['deg C'], 1, 2, ['K'])
# Fill the netCDF4-classic dataset with the data in these variables
nc = get_filled_nc_db(nc, z, ['z'], ['m'], ['synthetic'], 0,
['depth below the water surface'], ['depth'])
# Check that we cannot overwrite this existing z-data
try:
nc = ambient.fill_nc_db(nc, z, 'z', 'm', 'synthetic', 0)
except ValueError:
assert True is True
else:
assert True is False
# Fill in the remaining data
data = np.zeros((2, 3))
data[:,0] = z
data[:,1] = T
data[:,2] = S
nc = get_filled_nc_db(nc, data, ['z', 'temperature', 'salinity'],
['m', 'K', 'psu'],
['synthetic', 'synthetic', 'synthetic'], 0,
['depth below the water surface',
'Absolute temperature', 'Practical salinity'],
['depth', 'temperature', 'salinity'])
# Calculate and insert the pressure data
P = ambient.compute_pressure(data[:,0], data[:,1], data[:,2], 0)
P_data = np.vstack((data[:,0], P)).transpose()
nc = ambient.fill_nc_db(nc, P_data, ['z', 'pressure'], ['m', 'Pa'],
['measured', 'computed'], 0)
# Create and test a Profile object for this dataset.
lab = get_profile_obj(nc, [], [])
# Close down the pipes to the netCDF dataset files
lab.nc.close()
def check_from_roms():
"""
Test the ambient data methods on data read from ROMS.
this unit test reads in a ROMS netCDF output file, extracts the profile
information, and creates a new netCDF dataset and Profile class object
for use by the TAMOC modeling suite.
TODO (S. Socolofsky 7/15/2013): After fixing the octant.roms module to
have monotonically increasing depth, try to reinstate this test by
changing the function name from check_from_roms() to test_from_roms().
I was also having problems with being allowed to use the THREDDS netCDF
file with py.test. I could run the test under ipython, but not under
py.test.
"""
# Get a path to a ROMS dataset on a THREDDS server
nc_roms = 'http://barataria.tamu.edu:8080/thredds/dodsC/' + \
'ROMS_Daily/08122012/ocean_his_08122012_24.nc'
# Prepare the remaining inputs to the get_nc_db_from_roms() function
# call
nc_file = os.path.join(OUTPUT_DIR,'test_roms.nc')
t_idx = 0
j_idx = 400
i_idx = 420
chem_names = ['dye_01', 'dye_02']
(nc, nc_roms) = ambient.get_nc_db_from_roms(nc_roms, nc_file, t_idx,
j_idx, i_idx, chem_names)
# Check the data are inserted correctly from ROMS into the new netCDF
# dataset
assert nc.summary == 'ROMS Simulation Data'
assert nc.sea_name == 'ROMS'
assert nc.variables['z'][:].shape[0] == 51
assert nc.variables['z'][0] == nc.variables['z'].valid_min
assert nc.variables['z'][-1] == nc.variables['z'].valid_max
assert_approx_equal(nc.variables['temperature'][0], 303.24728393554688,
significant = 6)
assert_approx_equal(nc.variables['salinity'][0], 36.157352447509766,
significant = 6)
assert_approx_equal(nc.variables['pressure'][0], 101325.0,
significant = 6)
assert_approx_equal(nc.variables['dye_01'][0], 3.4363944759034656e-22,
significant = 6)
assert_approx_equal(nc.variables['dye_02'][0], 8.8296093939330156e-21,
significant = 6)
assert_approx_equal(nc.variables['temperature'][-1], 290.7149658203125,
significant = 6)
assert_approx_equal(nc.variables['salinity'][-1], 35.829414367675781,
significant = 6)
assert_approx_equal(nc.variables['pressure'][-1], 3217586.2927573984,
significant = 6)
assert_approx_equal(nc.variables['dye_01'][-1], 8.7777050221856635e-22,
significant = 6)
assert_approx_equal(nc.variables['dye_02'][-1], 4.0334050451121613e-20,
significant = 6)
# Create a Profile object from this netCDF dataset and test the Profile
# methods
roms = get_profile_obj(nc, chem_names, ['kg/m^3', 'kg/m^3'])
# Close the pipe to the netCDF dataset
roms.nc.close()
nc_roms.close()
def test_profile_deeper():
"""
Test the methods to compute buoyancy_frequency and to extend a CTD profile
to greater depths. We just test the data from ctd_bm54.cnv since these
methods are independent of the source of data.
"""
# Make sure the netCDF file for the ctd_BM54.cnv is already created by
# running the test file that creates it.
test_from_ctd()
# Get a Profile object from this dataset
nc_file = os.path.join(OUTPUT_DIR,'test_BM54.nc')
ctd = ambient.Profile(nc_file, chem_names=['oxygen'])
# Compute the buoyancy frequency at 1500 m and verify that the result is
# correct
N = ctd.buoyancy_frequency(1529.789, h=0.01)
assert_approx_equal(N, 0.00061463758327116565, significant=6)
# Record a few values to check after running the extension method
T0, S0, P0, o20 = ctd.get_values(1000., ['temperature', 'salinity',
'pressure', 'oxygen'])
z0 = ctd.interp_ds.coords['z'].values
# Extend the profile to 2500 m
nc_file = os.path.join(OUTPUT_DIR,'test_BM54_deeper.nc')
ctd.extend_profile_deeper(2500., nc_file)
# Check if the original data is preserved
T1, S1, P1, o21 = ctd.get_values(1000., ['temperature', 'salinity',
'pressure', 'oxygen'])
z1 = ctd.interp_ds.coords['z'].values
# Make sure the results are still right
assert_approx_equal(T1, T0, significant=6)
assert_approx_equal(S1, S0, significant=6)
assert_approx_equal(P1, P0, significant=6)
assert_approx_equal(o21, o20, significant=6)
print(z1.shape, z0.shape)
assert z1.shape[0] > z0.shape[0]
assert z1[-1] == 2500.
# Note that the buoyancy frequency shifts very slightly because density
# is not linearly proportional to salinity. Nonetheless, the results are
# close to what we want, so this method of extending the profile works
# adequately.
N = ctd.buoyancy_frequency(1500.)
assert_approx_equal(N, 0.0006320416080592639, significant=6)
N = ctd.buoyancy_frequency(2500.)
assert_approx_equal(N, 0.0006146292892002274, significant=6)
ctd.close_nc()
| socolofs/tamoc | tamoc/test/test_ambient.py | Python | mit | 29,724 | [
"NetCDF"
] | bfa2dba33a097d0f9ef96b4cbdb6c2fcc5cebfb071205e9093889cfc7c046e83 |
"""Module for easy compartmental implementation of a BRIAN2 network.
Build parts of a network via subclassing :class:`~pypet.brian2.network.NetworkComponent` and
:class:`~pypet.brian2.network.NetworkAnalyser` for recording and statistical analysis.
Specify a :class:`~pypet.brian2.network.NetworkRunner` (subclassing optionally) that handles
the execution of your experiment in different subruns. Subruns can be defined
as :class:`~pypet.brian2.parameter.Brian2Parameter` instances in a particular
trajectory group. You must add to every parameter's :class:`~pypet.annotations.Annotations` the
attribute `order`. This order must be an integer specifying the index or order
the subrun should about to be executed in.
The creation and management of a BRIAN2 network is handled by the
:class:`~pypet.brian2.network.NetworkManager` (no need for subclassing). Pass your
components, analyser and your runner to the manager.
Pass the :func:`~pypet.brian2.network.run_network` function together with a
:class:`~pypet.brian2.network.NetworkManager` to your main environment function
:func:`~pypet.environment.Environment.run` to start a simulation and parallel
parameter exploration. Be aware that in case of a *pre-built* network,
successful parameter exploration
requires parallel processing (see :class:`~pypet.brian2.network.NetworkManager`).
"""
__author__ = 'Robert Meyer'
from brian2 import Network, second
from pypet.pypetlogging import HasLogger
class NetworkComponent(HasLogger):
"""Abstract class to define a component of a BRIAN2 network.
Can be subclassed to define the construction of NeuronGroups or
Synapses, for instance.
"""
def add_parameters(self, traj):
"""Adds parameters to `traj`.
Function called from the :class:`~pypet.brian2.network.NetworkManager` to
define and add parameters to the trajectory container.
"""
pass
def pre_build(self, traj, brian_list, network_dict):
"""Builds network objects before the actual experimental runs.
Function called from the :class:`~pypet.brian2.network.NetworkManager` if
components can be built before the actual experimental runs or in
case the network is pre-run.
Parameters are the same as for the :func:`~pypet.brian2.network.NetworkComponent.build`
method.
"""
pass
def build(self, traj, brian_list, network_dict):
"""Builds network objects at the beginning of each individual experimental run.
Function called from the :class:`~pypet.brian2.network.NetworkManager`
at the beginning of every experimental run,
:param traj:
Trajectory container
:param brian_list:
Add BRIAN2 network objects like NeuronGroups or Synapses to this list.
These objects will be automatically added at the instantiation of the network
in case the network was not pre-run
via `Network(*brian_list)`.
:param network_dict:
Add any item to this dictionary that should be shared or accessed by all
your components and which are not part of the trajectory container.
It is recommended to also put all items from the `brian_list` into
the dictionary for completeness.
For convenience I recommend documenting the implementation of `build` and
`pre-build` and so on in the subclass like the following. Use statements like `Adds`
for items that are added to the list and the dict and statements like `Expects`
for what is needed to be part of the `network_dict` in order to build the
current component.
brian_list:
Adds:
4 Connections, between all types of neurons (e->e, e->i, i->e, i->i)
network_dict:
Expects:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
Adds:
'connections': List of 4 Connections,
between all types of neurons (e->e, e->i, i->e, i->i)
"""
pass
def add_to_network(self, traj, network, current_subrun, subrun_list, network_dict):
"""Can add network objects before a specific `subrun`.
Called by a :class:`~pypet.brian2.network.NetworkRunner` before a the
given `subrun`.
Potentially one wants to add some BRIAN2 objects later to the network than
at the very beginning of an experimental run. For example, a monitor might
be added at the second subrun after an initial phase that is not supposed
to be recorded.
:param traj: Trajectoy container
:param network:
BRIAN2 network where elements could be added via `add(...)`.
:param current_subrun:
:class:`~pypet.brian2.parameter.Brian2Parameter` specifying the very next
subrun to be simulated.
:param subrun_list:
List of :class:`~pypet.brian2.parameter.Brian2Parameter` objects that are to
be run after the current subrun.
:param network_dict:
Dictionary of items shared by all components.
"""
pass
def remove_from_network(self, traj, network, current_subrun, subrun_list, network_dict):
"""Can remove network objects before a specific `subrun`.
Called by a :class:`~pypet.brian2.network.NetworkRunner` after a
given `subrun` and shortly after analysis (see
:class:`~pypet.brian2.network.NetworkAnalyser`).
:param traj: Trajectoy container
:param network:
BRIAN2 network where elements could be removed via `remove(...)`.
:param current_subrun:
:class:`~pypet.brian2.parameter.Brian2Parameter` specifying the current subrun
that was executed shortly before.
:param subrun_list:
List of :class:`~pypet.brian2.parameter.Brian2Parameter` objects that are to
be run after the current subrun.
:param network_dict:
Dictionary of items shared by all components.
"""
pass
class NetworkAnalyser(NetworkComponent):
"""Specific NetworkComponent that analysis a network experiment.
Can be subclassed to create components for statistical analysis of a network
and network monitors.
"""
def analyse(self, traj, network, current_subrun, subrun_list, network_dict):
"""Can perform statistical analysis on a given network.
Called by a :class:`~pypet.brian2.network.NetworkRunner` directly after a
given `subrun`.
:param traj: Trajectoy container
:param network: BRIAN2 network
:param current_subrun:
:class:`~pypet.brian2.parameter.Brian2Parameter` specifying the current subrun
that was executed shortly before.
:param subrun_list:
List of :class:`~pypet.brian2.parameter.Brian2Parameter` objects that are to
be run after the current subrun. Can be deleted or added to change the actual course
of the experiment.
:param network_dict:
Dictionary of items shared by all components.
"""
pass
class NetworkRunner(NetworkComponent):
"""Specific NetworkComponent to carry out the running of a BRIAN2 network experiment.
A NetworRunner only handles the execution of a network simulation, the `BRIAN2 network` is
created by a :class:`~pypet.brian2.network.NetworkManager`.
Can potentially be subclassed to allow the adding of parameters via
:func:`~pypet.brian2.network.NetworkComponent.add_parameters`. These parameters
should specify an experimental run with a :class:~pypet.brian2.parameter.Brian2Parameter`
to define the order and duration of network subruns. For the actual experimental runs,
all subruns must be stored in a particular trajectory group.
By default this `traj.parameters.simulation.durations`. For a pre-run
the default is `traj.parameters.simulation.pre_durations`. These default group names
can be changed at runner initialisation (see below).
The network runner will look in the `v_annotations` property of each parameter
in the specified trajectory group. It searches for the entry `order`
to determine the order of subruns.
:param report:
How simulation progress should be reported, see also the parameters of
run(...) in a BRIAN2 network.
:param report_period:
How often progress is reported. If not specified 10 seconds is chosen.
:param durations_group_name:
Name where to look for :class:`~pypet.brian2.parameter.Brian2Parameter` instances
in the trajectory which specify the order and durations of subruns.
:param pre_durations_group_name:
As above, but for pre running a network.
Moreover, in your subclass you can log messages with the private attribute `_logger`
which is initialised in :func:`~pypet.pypetlogging.HasLogger._set_logger`.
"""
def __init__(self, report='text', report_period=None,
durations_group_name='simulation.durations',
pre_durations_group_name='simulation.pre_durations'):
if report_period is None:
report_period = 10 * second
self._report = report
self._report_period = report_period
self._durations_group_name = durations_group_name
self._pre_durations_group_name = pre_durations_group_name
self._set_logger()
def execute_network_pre_run(self, traj, network, network_dict, component_list, analyser_list):
"""Runs a network before the actual experiment.
Called by a :class:`~pypet.brian2.network.NetworkManager`.
Similar to :func:`~pypet.brian2.network.NetworkRunner.run_network`.
Subruns and their durations are extracted from the trajectory. All
:class:`~pypet.brian2.parameter.Brian2Parameter` instances found under
`traj.parameters.simulation.pre_durations` (default, you can change the
name of the group where to search for durations at runner initialisation).
The order is determined from
the `v_annotations.order` attributes. There must be at least one subrun in the trajectory,
otherwise an AttributeError is thrown. If two subruns equal in their order
property a RuntimeError is thrown.
:param traj: Trajectory container
:param network: BRIAN2 network
:param network_dict: Dictionary of items shared among all components
:param component_list: List of :class:`~pypet.brian2.network.NetworkComponent` objects
:param analyser_list: List of :class:`~pypet.brian2.network.NetworkAnalyser` objects
"""
self._execute_network_run(traj, network, network_dict, component_list, analyser_list,
pre_run=True)
def execute_network_run(self, traj, network, network_dict, component_list, analyser_list):
"""Runs a network in an experimental run.
Called by a :class:`~pypet.brian2.network.NetworkManager`.
A network run is divided into several subruns which are defined as
:class:`~pypet.brian2.parameter.Brian2Parameter` instances.
These subruns are extracted from the trajectory. All
:class:`~pypet.brian2.parameter.Brian2Parameter` instances found under
`traj.parameters.simulation.durations` (default, you can change the
name of the group where to search for durations at runner initialisation).
The order is determined from
the `v_annotations.order` attributes. An error is thrown if no orders attribute
can be found or if two parameters have the same order.
There must be at least one subrun in the trajectory,
otherwise an AttributeError is thrown. If two subruns equal in their order
property a RuntimeError is thrown.
For every subrun the following steps are executed:
1. Calling :func:`~pypet.brian2.network.NetworkComponent.add_to_network` for every
every :class:`~pypet.brian2.network.NetworkComponent` in the order as
they were passed to the :class:`~pypet.brian2.network.NetworkManager`.
2. Calling :func:`~pypet.brian2.network.NetworkComponent.add_to_network` for every
every :class:`~pypet.brian2.network.NetworkAnalyser` in the order as
they were passed to the :class:`~pypet.brian2.network.NetworkManager`.
3. Calling :func:`~pypet.brian2.network.NetworkComponent.add_to_network` of the
NetworkRunner itself (usually the network runner should not add or remove
anything from the network, but this step is executed for completeness).
4. Running the BRIAN2 network for the duration of the current subrun by calling
the network's `run` function.
5. Calling :func:`~pypet.brian2.network.NetworkAnalyser.analyse` for every
every :class:`~pypet.brian2.network.NetworkAnalyser` in the order as
they were passed to the :class:`~pypet.brian2.network.NetworkManager`.
6. Calling :func:`~pypet.brian2.network.NetworkComponent.remove_from_network` of the
NetworkRunner itself (usually the network runner should not add or remove
anything from the network, but this step is executed for completeness).
7. Calling :func:`~pypet.brian2.network.NetworkComponent.remove_from_network` for every
every :class:`~pypet.brian2.network.NetworkAnalyser` in the order as
they were passed to the :class:`~pypet.brian2.network.NetworkManager`
8. Calling :func:`~pypet.brian2.network.NetworkComponent.remove_from_network` for every
every :class:`~pypet.brian2.network.NetworkComponent` in the order as
they were passed to the :class:`~pypet.brian2.network.NetworkManager`.
These 8 steps are repeated for every subrun in the `subrun_list`.
The `subrun_list` passed to all `add_to_network`, `analyse` and
`remove_from_network` methods can be modified
within these functions to potentially alter the order of execution or
even erase or add upcoming subruns if necessary.
For example, a NetworkAnalyser checks
for epileptic pathological activity and cancels all coming subruns in case
of undesired network dynamics.
:param traj: Trajectory container
:param network: BRIAN2 network
:param network_dict: Dictionary of items shared among all components
:param component_list: List of :class:`~pypet.brian2.network.NetworkComponent` objects
:param analyser_list: List of :class:`~pypet.brian2.network.NetworkAnalyser` objects
"""
self._execute_network_run(traj, network, network_dict, component_list, analyser_list,
pre_run=False)
def _extract_subruns(self, traj, pre_run=False):
"""Extracts subruns from the trajectory.
:param traj: Trajectory container
:param pre_run: Boolean whether current run is regular or a pre-run
:raises: RuntimeError if orders are duplicates or even missing
"""
if pre_run:
durations_list = traj.f_get_all(self._pre_durations_group_name)
else:
durations_list = traj.f_get_all(self._durations_group_name)
subruns = {}
orders = []
for durations in durations_list:
for duration_param in durations.f_iter_leaves(with_links=False):
if 'order' in duration_param.v_annotations:
order = duration_param.v_annotations.order
else:
raise RuntimeError('Your duration parameter %s has no order. Please add '
'an order in `v_annotations.order`.' %
duration_param.v_full_name)
if order in subruns:
raise RuntimeError('Your durations must differ in their order, there are two '
'with order %d.' % order)
else:
subruns[order] = duration_param
orders.append(order)
return [subruns[order] for order in sorted(orders)]
def _execute_network_run(self, traj, network, network_dict, component_list,
analyser_list, pre_run=False):
"""Generic `execute_network_run` function, handles experimental runs as well as pre-runs.
See also :func:`~pypet.brian2.network.NetworkRunner.execute_network_run` and
:func:`~pypet.brian2.network.NetworkRunner.execute_network_pre_run`.
"""
# Initially extract the `subrun_list`
subrun_list = self._extract_subruns(traj, pre_run=pre_run)
# counter for subruns
subrun_number = 0
# Execute all subruns in order
while len(subrun_list) > 0:
# Get the next subrun
current_subrun = subrun_list.pop(0)
# 1. Call `add` of all normal components
for component in component_list:
component.add_to_network(traj, network, current_subrun, subrun_list,
network_dict)
# 2. Call `add` of all analyser components
for analyser in analyser_list:
analyser.add_to_network(traj, network, current_subrun, subrun_list,
network_dict)
# 3. Call `add` of the network runner itself
self.add_to_network(traj, network, current_subrun, subrun_list,
network_dict)
# 4. Run the network
self._logger.info('STARTING subrun `%s` (#%d) lasting %s.' %
(current_subrun.v_name, subrun_number, str(current_subrun.f_get())))
network.run(duration=current_subrun.f_get(), report=self._report,
report_period=self._report_period)
# 5. Call `analyse` of all analyser components
for analyser in analyser_list:
analyser.analyse(traj, network, current_subrun, subrun_list,
network_dict)
# 6. Call `remove` of the network runner itself
self.remove_from_network(traj, network, current_subrun, subrun_list,
network_dict)
# 7. Call `remove` for all analyser components
for analyser in analyser_list:
analyser.remove_from_network(traj, network, current_subrun, subrun_list,
network_dict)
# 8. Call `remove` for all normal components
for component in component_list:
component.remove_from_network(traj, network, current_subrun, subrun_list,
network_dict)
subrun_number += 1
class NetworkManager(HasLogger):
"""Manages a BRIAN2 network experiment and creates the network.
An experiment consists of
:param network_runner: A :class:`~pypet.brian2.network.NetworkRunner`
Special component that handles the execution of several subruns.
A NetworkRunner can be subclassed to implement the
:func:`~pypet.brian2.network.NetworkComponent.add_parameters` method to add
:class:`~pypet.brian2.parameter.Brian2Parameter` instances defining the
order and duration of subruns.
:param component_list:
List of :class:`~pypet.brian2.network.NetworkComponents` instances to create
and manage individual parts of a network.
They are build and added to the network in the order defined in the list.
:class:`~pypet.brian2.network.NetworkComponent` always needs to be sublcassed and
defines only an abstract interface. For instance, one could create her or his
own subclass called NeuronGroupComponent that creates NeuronGroups, Whereas
a SynapseComponent creates Synapses between the before built NeuronGroups.
Accordingly, the SynapseComponent instance is listed after
the NeuronGroupComponent.
:param analyser_list:
List of :class:`~pypet.brian2.network.NetworkAnalyser` instances for recording and
statistical evaluation of a BRIAN2 network. They should be used to add monitors
to a network and to do further processing of the monitor data.
This division allows to create compartmental network models where one can easily
replace parts of a network simulation.
:param network_constructor:
If you have a custom network constructor apart from the Brian one,
pass it here.
"""
def __init__(self, network_runner, component_list, analyser_list=(), network_constructor=None):
self.components = component_list
self.network_runner = network_runner
self.analysers = analyser_list
self._network_dict = {}
self._brian_list = []
self._set_logger()
self._pre_built = False
self._pre_run = False
self._network = None
if network_constructor is None:
self._network_constructor = Network
else:
self._network_constructor = network_constructor
def add_parameters(self, traj):
"""Adds parameters for a network simulation.
Calls :func:`~pypet.brian2.network.NetworkComponent.add_parameters` for all components,
analyser, and the network runner (in this order).
:param traj: Trajectory container
"""
self._logger.info('Adding Parameters of Components')
for component in self.components:
component.add_parameters(traj)
if self.analysers:
self._logger.info('Adding Parameters of Analysers')
for analyser in self.analysers:
analyser.add_parameters(traj)
self._logger.info('Adding Parameters of Runner')
self.network_runner.add_parameters(traj)
def pre_build(self, traj):
"""Pre-builds network components.
Calls :func:`~pypet.brian2.network.NetworkComponent.pre_build` for all components,
analysers, and the network runner.
`pre_build` is not automatically called but either needs to be executed manually
by the user, either calling it directly or by using
:func:`~pypet.brian2.network.NetworkManager.pre_run`.
This function does not create a `BRIAN2 network`, but only it's components.
:param traj: Trajectory container
"""
self._logger.info('Pre-Building Components')
for component in self.components:
component.pre_build(traj, self._brian_list, self._network_dict)
if self.analysers:
self._logger.info('Pre-Building Analysers')
for analyser in self.analysers:
analyser.pre_build(traj, self._brian_list, self._network_dict)
self._logger.info('Pre-Building NetworkRunner')
self.network_runner.pre_build(traj, self._brian_list, self._network_dict)
self._pre_built = True
def build(self, traj):
"""Pre-builds network components.
Calls :func:`~pypet.brian2.network.NetworkComponent.build` for all components,
analysers and the network runner.
`build` does not need to be called by the user. If `~pypet.brian2.network.run_network`
is passed to an :class:`~pypet.environment.Environment` with this Network manager,
`build` is automatically called for each individual experimental run.
:param traj: Trajectory container
"""
self._logger.info('Building Components')
for component in self.components:
component.build(traj, self._brian_list, self._network_dict)
if self.analysers:
self._logger.info('Building Analysers')
for analyser in self.analysers:
analyser.build(traj, self._brian_list, self._network_dict)
self._logger.info('Building NetworkRunner')
self.network_runner.build(traj, self._brian_list, self._network_dict)
def pre_run_network(self, traj):
"""Starts a network run before the individual run.
Useful if a network needs an initial run that can be shared by all individual
experimental runs during parameter exploration.
Needs to be called by the user. If `pre_run_network` is started by the user,
:func:`~pypet.brian2.network.NetworkManager.pre_build` will be automatically called
from this function.
This function will create a new BRIAN2 network which is run by
the :class:`~pypet.brian2.network.NetworkRunner` and it's
:func:`~pypet.brian2.network.NetworkRunner.execute_network_pre_run`.
To see how a network run is structured also take a look at
:func:`~pypet.brian2.network.NetworkRunner.run_network`.
:param traj: Trajectory container
"""
self.pre_build(traj)
self._logger.info('\n------------------------\n'
'Pre-Running the Network\n'
'------------------------')
self._network = self._network_constructor(*self._brian_list)
self.network_runner.execute_network_pre_run(traj, self._network, self._network_dict,
self.components, self.analysers)
self._logger.info('\n-----------------------------\n'
'Network Simulation successful\n'
'-----------------------------')
self._pre_run = True
if hasattr(self._network, 'store'):
self._network.store('pre_run')
def run_network(self, traj):
"""Top-level simulation function, pass this to the environment
Performs an individual network run during parameter exploration.
`run_network` does not need to be called by the user. If this
method (not this one of the NetworkManager)
is passed to an :class:`~pypet.environment.Environment` with this NetworkManager,
`run_network` and :func:`~pypet.brian2.network.NetworkManager.build`
are automatically called for each individual experimental run.
This function will create a new BRIAN2 network in case one was not pre-run.
The execution of the network run is carried out by
the :class:`~pypet.brian2.network.NetworkRunner` and it's
:func:`~pypet.brian2.network.NetworkRunner.execute_network_run` (also take
a look at this function's documentation to see the structure of a network run).
:param traj: Trajectory container
"""
# Check if the network was pre-built
if self._pre_built:
if self._pre_run and hasattr(self._network, 'restore'):
self._network.restore('pre_run')
# Temprorary fix for https://github.com/brian-team/brian2/issues/681
self._network.store('pre_run')
self._run_network(traj)
else:
self._run_network(traj)
def _pretty_print_explored_parameters(self, traj):
print_statement = '\n-------------------\n' +\
'Running the Network\n' +\
'-------------------\n' +\
' with\n'
explore_dict = traj.f_get_explored_parameters(copy=False)
for full_name in explore_dict:
parameter = explore_dict[full_name]
print_statement += '%s = %s\n' % (parameter.v_full_name, parameter.f_val_to_str())
print_statement += '-------------------'
self._logger.info(print_statement)
def _run_network(self, traj):
"""Starts a single run carried out by a NetworkRunner.
Called from the public function :func:`~pypet.brian2.network.NetworkManger.run_network`.
:param traj: Trajectory container
"""
self.build(traj)
self._pretty_print_explored_parameters(traj)
# We need to construct a network object in case one was not pre-run
if not self._pre_run:
self._network = self._network_constructor(*self._brian_list)
# Start the experimental run
self.network_runner.execute_network_run(traj, self._network, self._network_dict,
self.components, self.analysers)
self._logger.info('\n-----------------------------\n'
'Network Simulation successful\n'
'-----------------------------')
| SmokinCaterpillar/pypet | pypet/brian2/network.py | Python | bsd-3-clause | 28,901 | [
"Brian",
"NEURON"
] | 6a9c44cab43245ce31624e9d7726ab1d4f738f27329ad1c1f752f9758b8cc986 |
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from __future__ import unicode_literals
import mimetypes
import re
import time
import warnings
import lxml.html
from lxml.cssselect import CSSSelector
from zope.testbrowser.browser import Browser, ListControl
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
from splinter.driver import DriverAPI, ElementAPI
from splinter.driver.element_present import ElementPresentMixIn
from splinter.driver.find_links import FindLinks
from splinter.driver.xpath_utils import _concat_xpath_from_str
from splinter.cookie_manager import CookieManagerAPI
class CookieManager(CookieManagerAPI):
def add(self, cookie, **kwargs):
for key, value in cookie.items():
kwargs['name'] = key
kwargs['value'] = value
if key not in self.driver.cookies:
self.driver.cookies.create(**kwargs)
else:
self.driver.cookies.change(**kwargs)
def delete(self, *cookies):
if cookies:
for cookie in cookies:
try:
del self.driver.cookies[cookie]
except KeyError:
pass
else:
self.delete_all()
def delete_all(self):
self.driver.cookies.clearAll()
def all(self, verbose=False): # NOQA: A003
cookies = {}
for key, value in self.driver.cookies.items():
cookies[key] = value
return cookies
def __getitem__(self, item):
return self.driver.cookies[item]
def __contains__(self, key):
return key in self.driver.cookies
def __eq__(self, other_object):
if isinstance(other_object, dict):
return dict(self.driver.cookies) == other_object
return False
class ZopeTestBrowser(ElementPresentMixIn, DriverAPI):
driver_name = "zope.testbrowser"
def __init__(self, wait_time=2):
self.wait_time = wait_time
self._browser = Browser()
self._cookie_manager = CookieManager(self._browser)
self._last_urls = []
self.links = FindLinks(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def visit(self, url):
self._browser.open(url)
def back(self):
self._last_urls.insert(0, self.url)
self._browser.goBack()
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self._browser.reload()
def quit(self): # NOQA: A003
pass
@property
def htmltree(self):
try:
html = self.html.decode("utf-8")
except AttributeError:
html = self.html
return lxml.html.fromstring(html)
@property
def title(self):
return self._browser.title
@property
def html(self):
return self._browser.contents
@property
def url(self):
return self._browser.url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = self._browser.getControl(element.text)
return ElementList(
[ZopeTestBrowserOptionElement(control, self)], find_by="value", query=value
)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = self._browser.getControl(element.text)
return ElementList(
[ZopeTestBrowserOptionElement(control, self)], find_by="text", query=text
)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(
xpath, original_find="css", original_query=selector
)
def get_control(self, xpath_element):
return xpath_element
def find_by_xpath(self, xpath, original_find=None, original_query=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element) and xpath_element.name:
return self.find_by_name(xpath_element.name)
else:
elements.append(self.get_control(xpath_element))
find_by = original_find or "xpath"
query = original_query or xpath
return ElementList(
[ZopeTestBrowserElement(element, self) for element in elements],
find_by=find_by,
query=query,
)
def find_by_tag(self, tag):
return self.find_by_xpath(
"//%s" % tag, original_find="tag", original_query=tag
)
def find_by_value(self, value):
elem = self.find_by_xpath(
'//*[@value="%s"]' % value, original_find="value", original_query=value
)
if elem:
return elem
return self.find_by_xpath('//*[.="%s"]' % value)
def find_by_text(self, text):
xpath_str = _concat_xpath_from_str(text)
return self.find_by_xpath(
xpath_str,
original_find="text",
original_query=text,
)
def find_by_id(self, id_value):
return self.find_by_xpath(
'//*[@id="%s"][1]' % id_value,
original_find="id",
original_query=id_value,
)
def find_by_name(self, name):
elements = []
index = 0
while True:
try:
control = self._browser.getControl(name=name, index=index)
elements.append(control)
index += 1
except LookupError:
break
except NotImplementedError:
break
return ElementList(
[ZopeTestBrowserControlElement(element, self) for element in elements],
find_by="name",
query=name,
)
def find_link_by_text(self, text):
warnings.warn(
'browser.find_link_by_text is deprecated.'
' Use browser.links.find_by_text instead.',
FutureWarning,
)
return self.links.find_by_text(text)
def find_link_by_href(self, href):
warnings.warn(
'browser.find_link_by_href is deprecated.'
' Use browser.links.find_by_href instead.',
FutureWarning,
)
return self.links.find_by_href(href)
def find_link_by_partial_href(self, partial_href):
warnings.warn(
'browser.find_link_by_partial_href is deprecated.'
' Use browser.links.find_by_partial_href instead.',
FutureWarning,
)
return self.links.find_by_partial_href(partial_href)
def find_link_by_partial_text(self, partial_text):
warnings.warn(
'browser.find_link_by_partial_text is deprecated.'
' Use browser.links.find_by_partial_text instead.',
FutureWarning,
)
return self.links.find_by_partial_text(partial_text)
def fill(self, name, value):
self.find_by_name(name=name).first._control.value = value
def fill_form(self, field_values, form_id=None, name=None, ignore_missing=False):
form = self._browser
if name or form_id:
form = self._browser.getForm(name=name, id=form_id)
for name, value in field_values.items():
try:
control = form.getControl(name=name)
if control.type == "checkbox":
if value:
control.value = control.options
else:
control.value = []
elif control.type == "radio":
control.value = [
option for option in control.options if option == value
]
elif control.type == "select":
control.value = [value]
else:
control.value = value
except NotImplementedError as e:
if not ignore_missing:
raise NotImplementedError(e)
def choose(self, name, value):
control = self._browser.getControl(name=name)
control.value = [option for option in control.options if option == value]
def check(self, name):
control = self._browser.getControl(name=name)
control.value = control.options
def uncheck(self, name):
control = self._browser.getControl(name=name)
control.value = []
def attach_file(self, name, file_path):
filename = file_path.split("/")[-1]
control = self._browser.getControl(name=name)
content_type, _ = mimetypes.guess_type(file_path)
with open(file_path, 'rb') as f:
control.add_file(f, content_type, filename)
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList(
[ZopeTestBrowserLinkElement(link, self) for link in links],
find_by="xpath",
query=xpath,
)
def select(self, name, value):
self.find_by_name(name).first._control.value = [value]
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag("body").first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
def _element_is_link(self, element):
return element.tag == "a"
def _element_is_control(self, element):
return hasattr(element, "type")
@property
def cookies(self):
return self._cookie_manager
re_extract_inner_html = re.compile(r"^<[^<>]+>(.*)</[^<>]+>$")
class ZopeTestBrowserElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
def __getitem__(self, attr):
return self._element.attrib[attr]
def find_by_css(self, selector):
elements = self._element.cssselect(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_xpath(self, selector):
elements = self._element.xpath(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_name(self, name):
elements = self._element.cssselect('[name="%s"]' % name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_tag(self, name):
elements = self._element.cssselect(name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_value(self, value):
elements = self._element.cssselect('[value="%s"]' % value)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_text(self, text):
# Add a period to the xpath to search only inside the parent.
xpath_str = '.{}'.format(_concat_xpath_from_str(text))
return self.find_by_xpath(xpath_str)
def find_by_id(self, id): # NOQA: A002
elements = self._element.cssselect("#%s" % id)
return ElementList([self.__class__(element, self) for element in elements])
@property
def value(self):
return self._element.text_content()
@property
def text(self):
return self.value
@property
def outer_html(self):
return lxml.html.tostring(self._element, encoding="unicode").strip()
@property
def html(self):
return re_extract_inner_html.match(self.outer_html).group(1)
def has_class(self, class_name):
return len(self._element.find_class(class_name)) > 0
class ZopeTestBrowserLinkElement(ZopeTestBrowserElement):
def __init__(self, element, parent):
super(ZopeTestBrowserLinkElement, self).__init__(element, parent)
self._browser = parent._browser
def __getitem__(self, attr):
return super(ZopeTestBrowserLinkElement, self).__getitem__(attr)
def click(self):
return self._browser.open(self["href"])
class ZopeTestBrowserControlElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
try:
return getattr(self._control._control, attr)
except AttributeError:
return self._control._control.attrs[attr]
@property
def value(self):
value = self._control.value
if isinstance(self._control, ListControl) and len(value) == 1:
return value[0]
return value
@property
def checked(self):
return bool(self._control.value)
def click(self):
return self._control.click()
def fill(self, value):
self._control.value = value
def select(self, value):
self._control.value = [value]
class ZopeTestBrowserOptionElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return getattr(self._control, attr)
@property
def text(self):
return self._control.labels[0]
@property
def value(self):
return self._control.optionValue
@property
def selected(self):
return self._control.selected
| cobrateam/splinter | splinter/driver/zopetestbrowser.py | Python | bsd-3-clause | 14,435 | [
"VisIt"
] | b846da6d226d88684ad6e21d94df4ae321a32fa1753084074e3640165acac951 |
#!/usr/bin/env python
#simple calculcation done for Stellaris key contest among friends
#optimistic_const was question about number from 1 to 5
#quiz score from 0-100 here https://www.space.com/17791-milky-way-galaxy-quiz-trivia.html
#bad_luck was question about number from 1 to 1000
#also on https://repl.it/I59h/2 so every friend could launch it themselv
input_data = {
"_CJ_": {
"optimistic_const": 3,
"quiz": 73,
"bad_luck": 666
},
"paucto": {
"optimistic_const": 3,
"quiz": 82,
"bad_luck": 720
},
"dwi": {
"optimistic_const": 3,
"quiz": 55,
"bad_luck": 42
},
"Lance": {
"optimistic_const": 3,
"quiz": 73,
"bad_luck": 609
},
"Skodak": {
"optimistic_const": 4,
"quiz": 45,
"bad_luck": 256
}
}
def calculate(data):
result = {}
print('Let\'s find a winner!\n')
for hero in data:
print('Calcaluting optimism and knowledge about Milky way.')
score = data[hero]['optimistic_const'] * data[hero]['quiz']
print(data[hero]['optimistic_const'], '*', data[hero]['quiz'], '=', score)
print('Minus bad luck!')
print(score, '-', data[hero]["bad_luck"]/10, '=', score-data[hero]["bad_luck"]/10)
score -= (data[hero]["bad_luck"]/10)
score_msg = '\n{} : {}'.format(hero, score)
print(score_msg)
print('=' * (len(score_msg)-1),'\n') #-1 because of \n from score_msg
result[hero] = score
#magic use of lambda I have no idea how works
#but it will get the highest score
winner = max(result.keys(), key=(lambda key: result[key]))
#just to be sure
highest_score = max(result.values())
if result[winner] == highest_score:
#highest scores belons to the winner
winner_msg = '!The winner is {} with score: {}!'.format(winner, highest_score)
print('!' * len(winner_msg))
print(winner_msg)
print('!' * len(winner_msg))
return winner
else:
print('something went very wrong!')
return False
calculate(input_data)
| Pulecz/simple-games | calculate_quiz.py | Python | gpl-3.0 | 2,139 | [
"Galaxy"
] | 8f14e04ab7acf00b3a14cf09ae5ed94045e7bf9fcacb8b92ea603cd14c2ab71c |
#!/usr/bin/env python
"""
tidal_energy.py
State Estimation and Analysis for PYthon
Module to compute tidal energy from a column of data.
Written by Brian Powell on 03/30/16
Copyright (c)2017 University of Hawaii under the MIT-License.
Notes
-----
Barotropic to Baroclinic conversion is given by:
.. math::
C=1 / T_t \int_0^T_t P'_t * wbar_t * dt, (1)
where, T_t is the tidal period for consituent, t, P' is the pressure perturbation,
wbar is the vertical velocity. Hence, conversion is the time average of the
vertical motion of the bottom pressure perturbation. We can do it spectrally if
we represent P'_t and wbar_t as waves:
.. math::
P'_t = Amp_P'_t * sin( 2 * pi * t / T_t + Pha_P'_t ) (2) \\
wbar_t = Amp_wbar_t * sin( 2 * pi * t / T_t + Pha_wbar_t ) (3)
If we substitute (2) and (3) into (1) using trig. identity and integrate over
the period (recall that integrating a wave over one period is zero):
.. math::
Conversion = 0.5 * Amp_P'_t * Amp_wbar_t * cos( Pha_P'_t - Pha_wbar_t )(4)
Energy Flux is given by:
.. math::
Flux_u = 1 / T_t * \int_0^T_t u'_t * P'_t * dt, (5) \\
Flux_v = 1 / T_t * \int_0^T_t v'_t * P'_t * dt, (6)
where u' and v' are the velocity anomalies for the constituent, t. As per
above, we can express as waves to yield:
.. math::
Flux_u = 0.5 * Amp_u'_t * Amp_P'_t * cos( Pha_u'_t - Pha_P'_t ) (7) \\
Flux_v = 0.5 * Amp_v'_t * Amp_P'_t * cos( Pha_v'_t - Pha_P'_t ) (8)
Displacement is given by:
.. math::
Displace = \int_0^T_t/2 g * rho'_t / ( rho0 * N_t**2 ) * dt, (9)
where rho' is the density anomaly and N**2 is the Brunt-Vaisala. NOTE:
this is integrated over one-half period becuase (by definition), it would
integrate to zero. However, if we know the tidal vertical velocity, then
we can integrate it for one-half period for the todal displacement:
.. math::
Displace = \int_0^T_t/2 w_t * dt \\
= \int_0^T_t/2 Amp_w_t * sin( 2 * pi * t / T_t ) (10) \\
= Amp_w_t * T_t / pi
Horizontal Kinetic Energy is given by:
.. math::
HKE = 0.5 * rho0 * 1 / T_t * \int_0^T_t (u'_t**2 + v'_t**2) * dt (11)
substitute u' and v' as waveforms and integrate over a period,
.. math::
HKE = 0.5 * rho0 * 0.5 * ( Amp_u'_t**2 _ Amp_v'_t**2 ) (12)
Available Potential Energy is given by:
.. math::
APE = 0.5 * rho0 * 1 / T_t * \int_0^T_t N_t**2 * Displace_t**2 * dt (13)
For this, we will use the time-average N**2 (not at the specific tidal
frequency) and use (10); hence, it becomes:
.. math::
APE = 0.5 * rho0 * (Amp_w_t * T_t / pi)**2 * 1/T_t \int_0^T_t N**2 * dt (14)
"""
import numpy as np
import seapy
_rho0 = 1000
class energetics():
"""
This class is a container for the energetics produced by the tidal_energy
calculation to simplify access to the resulting data.
"""
def __init__(self, tides, energy, integrals, ellipse):
try:
self.tides = tides.tolist()
except AttributeError:
self.tides = tides
self.energy = energy
if len(tides) != energy.shape[0]:
raise ValueError(
"The number of tides and energy values are inconsistent")
self.integrals = integrals
self.ellipse = ellipse
pass
def __getitem__(self, key):
"""
Return the energetics for a tidal constituent
"""
t = self.tides.index(key.upper())
return {"conversion": self.integrals[t, 2],
"flux_u": self.energy[t, :, 0],
"flux_v": self.energy[t, :, 1],
"disp": self.energy[t, :, 2],
"hke": self.energy[t, :, 3],
"ape": self.energy[t, :, 4],
"total_flux_u": self.integrals[t, 0],
"total_flux_v": self.integrals[t, 1],
"total_hke": self.integrals[t, 3],
"total_ape": self.integrals[t, 4],
"ellipse": self.ellipse[key.upper()]}
pass
def tidal_energy(time, hz, u, v, w, pressure, bvf=None, tides=None,
ubar=None, vbar=None, wbar=None):
"""
Calculate aspects of tidal energy from the given data: baroclinic energy flux,
HKE, APE, displacement, and conversion.
This only works for a single depth profile, and the arrays are to be 2D with
dimensions of [time, depth] with depth index 0 as the bottom and inded -1 as
the surface. Likewise, the hz field is oriented the same.
Parameters
----------
time : list of datetime,
times of data
hz : ndarray,
Thickness of the water column represented by 3D quantities [m]
u : ndarray,
u-component of 3D velocity [m s**-1]
v : ndarray,
v-component of 3D velocity [m s**-1]
w : ndarray,
w-component of 3D velocity [m s**-1]
pressure : ndarray,
pressure of the 3D field [dbar]
bvf : ndarray, optional
Brunt-Vaisala Frequency of the 3D field [s**-1]. If not specified
the APE will not be computed
tides: list of strings, optional
The names of the tides to use for analysis. If none
provided, use the defaults from seapy.tide
ubar : ndarray, optional
u-component of barotropic velocity [m s**-1]. If none
provided, compute from u
vbar : ndarray, optional
v-component of barotropic velocity [m s**-1]. If none
provided, compute from v
wbar : ndarray, optional
w-component of barotropic velocity [m s**-1]. If none
provided, compute from w
Returns
-------
energetics : class,
The energetics for each tidal consituent as well as the
vertically integrated properties. The energetics class
provides various methods for accessing the data
"""
# Ensure arrays as needed
u = np.ma.array(u)
v = np.ma.array(v)
w = np.ma.array(w)
pressure = np.ma.array(pressure)
# Setup the thicknesses in time
hz = np.ma.array(hz)
if hz.ndims == 1:
hz = np.tile(hz, (u.shape[0]))
total_h = np.sum(hz, axis=1)
ndep = hz.shape[1]
# If BVF not given, set to zero
if bvf:
bvf = np.ma.array(bvf).mean(axis=0)
else:
bvf = np.zeros(ndep)
# Setup the tides
tides = seapy.tide._set_tides(tides)
ntides = len(tides)
period = 3600 / seapy.tide.frequency(tides)
# Setup the barotropic velocities
if ubar and vbar:
ubar = np.ma.array(ubar)
vbar = np.ma.array(vbar)
wbar = np.ma.array(wbar)
else:
ubar = np.sum(hz * u, axis=1) / total_h
vbar = np.sum(hz * v, axis=1) / total_h
wbar = np.sum(hz * w, axis=1) / total_h
# Calculate Pressure Anomalies
p_prime = pressure - pressure.mean(axis=0)
# Apply baroclinicity
p_prime -= np.sum(p_prime * hz) / np.sum(hz)
# Calculate tides
tidal_vel = seapy.tide.fit(time, ubar + 1j * vbar, tides)
wbar = seapy.tide.fit(time, wbar, tides)
# Store the tidal ellipse
ellipse = {}
for t in tides:
ellipse[t] = seapy.tide.tellipse(tidal_vel['major'][t].amp,
tidal_vel['minor'][t].amp,
tidal_vel['minor'][t].phase,
tidal_vel['major'][t].phase)
# Velocity anomalies
u_prime = u - u.mean(axis=0) - np.real(tidal_vel['fit'])
v_prime = v - v.mean(axis=0) - np.imag(tidal_vel['fit'])
w_prime = w - v.mean(axis=0) - wbar['fit']
wbar = wbar['major']
# Set the results structure: for each tide, and for each
# depth, we will store five values (flux_u, flux_v,
# displacement, HKE, APE)
energy = np.zeros((ntides, ndep, 5))
# For vertically integrated, we will also have five values:
# (flux_u, flux_v, conversion, HKE, APE)
integrals = np.zeros((ntides, 5))
# Compute over all depths
for d in seapy.progressbar.progress(np.arange(ndep)):
# Generate the tidal components
t_pres = seapy.tide.fit(time, p_prime[:, d], tides)['major']
# velocity
t_u = seapy.tide.fit(time, u_prime[:, d], tides)['major']
t_v = seapy.tide.fit(time, v_prime[:, d], tides)['major']
t_w = seapy.tide.fit(time, w_prime[:, d], tides)['major']
# Compute each term for each tide
for n, t in enumerate(tides):
# If this is the bottom, generate the conversion
if d == 0:
integrals[n, 2] = 0.5 * t_pres[t].amp * wbar[t].amp * \
np.cos(t_pres[t].phase - wbar[t].phase)
# Calculate Energy Flux
energy[n, d, 0] = 0.5 * t_u[t].amp * \
t_pres[t].amp * np.cos(t_u[t].phase - t_pres[t].phase)
energy[n, d, 1] = 0.5 * t_v[t].amp * \
t_pres[t].amp * np.cos(t_v[t].phase - t_pres[t].phase)
# Calculate Displacement
energy[n, d, 2] = t_w[t].amp * 3600 * period[t] / np.pi
# Calculate HKE and APE
energy[n, d, 3] = 0.5 * _rho0 * bvf[d] * displace
energy[n, d, 4] = 0.25 * _rho0 * (t_u[t].amp + t_v[t].amp)
# Vertically Integrate
for n, t in enumerate(tides):
for i in [0, 1, 3, 4]:
integrals[n, i] = np.sum(energy[n, :, i] * hz, axis=1) / total_h
# Put it all together to return
return energetics(tides, energy, integrals, ellipse)
| ocefpaf/seapy | seapy/tidal_energy.py | Python | mit | 9,529 | [
"Brian"
] | c66ba5358b0c0c53434046ff6e08ac9d2f6028db2f7cf4461337552701c798b8 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://allrls.net'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'RLSSource.net'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=True, cache_limit=.5)
q_str = ''
match = re.search('class="entry-title">([^<]+)', html)
if match:
q_str = match.group(1)
pattern = 'href="?([^" ]+)(?:[^>]+>){2}\s+\|'
for match in re.finditer(pattern, html, re.DOTALL):
url = match.group(1)
if 'adf.ly' in url:
continue
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'quality': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(url).hostname
hoster['quality'] = scraper_utils.blog_get_quality(video, q_str, hoster['host'])
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video, delim=' ')
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" Filter results older than (0=No Filter) (days)" default="30" visible="eq(-4,true)"/>' % (name))
settings.append(' <setting id="%s-select" type="enum" label=" Automatically Select" values="Most Recent|Highest Quality" default="0" visible="eq(-5,true)"/>' % (name))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
html = self._http_get(self.base_url, params={'s': title, 'go': 'Search'}, require_debrid=True, cache_limit=1)
pattern = 'href="(?P<url>[^"]+)[^>]+rel="bookmark">(?P<post_title>[^<]+).*?class="entry-date">(?P<date>\d+/\d+/\d+)'
date_format = '%m/%d/%Y'
return self._blog_proc_results(html, pattern, date_format, video_type, title, year)
| JamesLinEngineer/RKMC | addons/plugin.video.salts/scrapers/rlssource_scraper.py | Python | gpl-2.0 | 3,523 | [
"ADF"
] | d14fe3d97760a89d5ad822885d99f33a26cacb893fb28d6d86726bf9248dc76c |
'''
Created on Jun 16, 2016
@author: Jose Pedro Matos
'''
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
import ntpath
import tempfile
import gzip
import shutil
import sys
import re
import geojson
import json
import struct
from urllib import request
from multiprocessing.dummy import Pool as ThreadPool
from netCDF4 import Dataset, num2date, date2num #@UnresolvedImport
from dateutil.relativedelta import relativedelta
from astropy.io.ascii.tests.test_connect import files
from celery import current_task
class SatelliteData(object):
'''
classdocs
'''
filePrefix = 'unknown'
precision = np.single
significantDigits = None
downloadFailThreshold = 50000
productSite = 'unknown'
downloadSite = 'unknown'
description = 'none'
timestep = {}
units = 'unknown'
def __init__(self, dataFolder, downloadFolder, username=None, password=None):
'''
Constructor
'''
self.downloadFolder = downloadFolder
self.dataFolder = dataFolder
self.username = None
self.password = None
if not os.path.isdir(self.dataFolder):
os.makedirs(self.dataFolder)
if not os.path.isdir(self.downloadFolder):
os.makedirs(self.downloadFolder)
self._listData()
def downloadList(self, dateIni, dateEnd):
'''
abstract method
Returns a tuple defining files to be downloaded. It should contain:
a list of file names on disk and
a list of urls for download.
'''
pass
def downloadedDates(self, fileType):
'''
abstract method
Returns a tuple containing:
a list of files in folder that are have a given extension and
a list of dates corresponding to each file
'''
pass
def importData(self, fileName):
'''
abstract method
Returns:
'''
pass
def getData(self, dateIni, dateEnd):
# load data
self.process(dateIni=dateIni, dateEnd=dateEnd, download=False, read=False)
# export data
if 'loaded' in self.__dict__.keys():
return self.loaded
else:
return {}
def getDataForJSON(self, dateIni, dateEnd, returnData=True, returnInfo=True):
# get data
data = self.getData(dateIni, dateEnd)
idxs = np.where((np.nansum(data['data']+1, axis=0)!=0).ravel())[0]
idxsList = idxs.tolist()
# trim data
if len(data)>0:
data['dates'] = [dt.isoformat() for dt in data['dates']]
data['missing'] = data['missing'].tolist()
if returnInfo:
data['lon'] = data['lon'].tolist()
data['lat'] = data['lat'].tolist()
data['idxs'] = idxsList
else:
data.pop('lon', None)
data.pop('lat', None)
data.pop('idxs', None)
if returnData:
tmp = []
for i0 in range(data['data'].shape[0]):
tmpValidData = data['data'][i0,:,:].ravel()[idxsList]
tmpValidData[np.isnan(tmpValidData)] = -999;
tmpPositiveIdxs = np.where(tmpValidData!=0)[0]
tmp.append({'idxs': idxs[tmpPositiveIdxs].tolist(), 'values': tmpValidData[tmpPositiveIdxs].tolist()})
data['data'] = tmp
else:
data.pop('data', None)
return data
else:
return {}
def download(self, dateIni, dateEnd):
# Call data-specific method to define file names and download urls
fileList, urlList = self.downloadList(dateIni, dateEnd)
# File list
toDownload = []
for i0 in range(len(fileList)):
if not os.path.isfile(fileList[i0]):
toDownload.append((fileList[i0], urlList[i0]))
ctr = 0
failed = []
notFound = []
while ctr==0 or (ctr < 4 and len(failed)>0):
# Download files
downloadSizes = []
if len(toDownload)>0:
pool = ThreadPool(self.downloadThreads)
toDownloadSplit = [toDownload[i0:i0+self.downloadThreads] for i0 in range(0, len(toDownload), self.downloadThreads)]
tmpBarLen = len(toDownloadSplit)
if ctr==0:
print('Downloading files:')
else:
warnings.warn(str(len(failed)) + ' failed download(s)...', UserWarning)
print('Reattempting failed downloads (' + str(ctr) + '):')
for i0, l0 in enumerate(toDownloadSplit):
self._printProgress(i0, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Downloading files (%s to %s)' % (os.path.split(l0[0][1])[1], os.path.split(l0[-1][1])[1])),
'state': 'PROGRESS'})
except Exception:
pass
tmp = pool.map(self._downloadFile, l0)
downloadSizes.extend(tmp)
self._printProgress(tmpBarLen, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
pool.close()
pool.join()
# Check sizes and delete failed ones
failed = []
for i0, s0 in enumerate(downloadSizes):
if s0<0:
if os.path.isfile(toDownload[i0][0]):
os.remove(toDownload[i0][0])
notFound.append((toDownload[i0][0], toDownload[i0][1]))
elif s0<self.downloadFailThreshold:
if os.path.isfile(toDownload[i0][0]):
os.remove(toDownload[i0][0])
failed.append((toDownload[i0][0], toDownload[i0][1]))
toDownload = failed
ctr += 1
if len(failed)>0:
warnings.warn('permanently failed download(s). Re-run the download method and consider reducing the number of threads:\n' + str([f0 for f0 in failed[0]]), UserWarning)
if len(notFound)>0:
warnings.warn('download file(s) not found. The files may not be available yet:\n' + str([f0 for f0 in notFound[0]]), UserWarning)
# return halt signal
if len(urlList)>0 and len(notFound)==len(urlList):
return True
else:
return False
def readDownloads(self, dates, geometryFile=None, geometryStr=''):
'''
Reads the downloaded files using methods specific to the subclasses
'''
# retrieve a list of filenames and dates
filePaths, fileDates = self.downloadedDates()
# find which dates are covered by files
existingFiles = []
existingDates = []
for d0 in dates:
if d0 in fileDates:
idx = fileDates.index(d0)
existingFiles.append(filePaths[idx])
existingDates.append(fileDates[idx])
# create a temporary folder
self.tmpFolder = tempfile.mkdtemp(prefix='tmp__', dir=self.dataFolder)
try:
# Interpret first file in the list and create the downloaded dictionary
self.downloaded = {}
self.downloaded['dates'] = dates
tmpData, self.downloaded['lat'], self.downloaded['lon']=self.importData(existingFiles[0])
# Ensure that records fall within the [-180, 180] and [-90, 90] ranges
self.downloaded['lon'] = np.mod(self.downloaded['lon'], 360)
self.downloaded['lon'] = np.mod(self.downloaded['lon'] + 360, 360)
self.downloaded['lon'][self.downloaded['lon']>180] = self.downloaded['lon'][self.downloaded['lon']>180]-360
self.downloaded['lat'] = np.mod(self.downloaded['lat'], 180)
self.downloaded['lat'] = np.mod(self.downloaded['lat'] + 180, 180)
self.downloaded['lat'][self.downloaded['lat']>90] = self.downloaded['lat'][self.downloaded['lat']>90]-180
# Store data
self.downloaded['data']=np.empty((len(dates), tmpData.shape[1], tmpData.shape[2]), dtype=self.precision)
self.downloaded['data'][:] = np.nan
self.downloaded['missing'] = np.ones((len(dates),), dtype=np.bool)
self.downloaded['data'][0, :,:]=tmpData
self.downloaded['missing'][0] = False
# Interpret all the remaining files
existingFiles.pop(0)
existingDates.pop(0)
threads = self.readThreads
with ThreadPool(threads) as pool:
toInterpretSplit = [existingFiles[i0:i0+threads] for i0 in range(0, len(existingFiles), threads)]
tmpBarLen = len(toInterpretSplit)
imported = []
if tmpBarLen>0:
print('Reading files:')
for i0, l0 in enumerate(toInterpretSplit):
self._printProgress(i0, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Reading files (%s to %s)' % (os.path.split(l0[0])[1], os.path.split(l0[-1])[1])),
'state': 'PROGRESS'})
except Exception:
pass
imported.extend(pool.map(self.importData, l0))
self._printProgress(tmpBarLen, tmpBarLen, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
except Exception as ex:
raise(ex)
finally:
#===================================================================
# os.rmdir(self.tmpFolder)
#===================================================================
shutil.rmtree(self.tmpFolder)
# store interpretations
for i0, t0 in enumerate(imported):
idx = dates.index(existingDates[i0])
self.downloaded['data'][idx, :,:] = t0[0]
self.downloaded['missing'][idx] = False
# define indexes
if geometryStr!='':
self.setGeometryInfo(geometryStr)
elif 'geometryInfo' not in self.__dict__.keys():
self.geometryInfo = self._getGeometyIdxs(lat=self.downloaded['lat'], lon=self.downloaded['lon'], filePath=geometryFile)
# crop data
self.downloaded['lat'] = self.geometryInfo['lat']
self.downloaded['lon'] = self.geometryInfo['lon']
tmp = np.empty((len(dates), self.geometryInfo['lat'].shape[0], self.geometryInfo['lon'].shape[0]), dtype=self.precision)
tmp[:] = np.nan
tmp[:, self.geometryInfo['idxReduced'][0], self.geometryInfo['idxReduced'][1]] = self.downloaded['data'][:, self.geometryInfo['idxOriginal'][0], self.geometryInfo['idxOriginal'][1]]
self.downloaded['data'] = tmp
def update(self, download=True, downloadThreads=3, readThreads=1, geometryFile=None, geometryStr=''):
year = max(self.netCDFDict.keys())
month = max(self.netCDFDict[year].keys())
lastRecord = self.store(dateIni=dt.datetime(year, month, 1), dateEnd=dt.datetime.now(), download=True, downloadThreads=downloadThreads, readThreads=readThreads, geometryFile=geometryFile, geometryStr=geometryStr)
return lastRecord
def store(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now(), download=True, downloadThreads=3, readThreads=1, geometryFile=None, geometryStr=''):
self.downloadThreads = downloadThreads
self.readThreads = readThreads
dates = self.filePeriod(dateIni=dateIni, dateEnd=dateEnd)
monthIdxs = np.array(self._splitByMonth(dates))
tmp = [np.where(monthIdxs==m0)[0][0] for m0 in np.unique(monthIdxs)]
sortedIdxs = sorted(range(len(tmp)), key=lambda i0: tmp[i0])
tmpPeriods = len(sortedIdxs)
for i0, m0 in enumerate(sortedIdxs):
tmp = np.where(monthIdxs==m0)[0]
monthDates = np.array(dates)[tmp]
dateIni = np.min(monthDates)
dateEnd = np.max(monthDates)
print('Storing %02u/%04u...' % (dateIni.month, dateIni.year))
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Storing data: %02u.%04u' % (dateIni.month, dateIni.year)),
'progress': i0/tmpPeriods,
'state': 'PROGRESS'})
except Exception:
pass
halt = self.process(dateIni=dateIni, dateEnd=dateEnd, download=download, geometryFile=geometryFile, geometryStr=geometryStr)
if not halt:
self.save()
lastRecord = self.loaded['dates'][np.where(False==self.loaded['missing'])[0][-1]]
self.__dict__.pop('loaded', None)
if 'downloaded' in self.__dict__.keys():
self.__dict__.pop('downloaded', None)
self._listData()
return lastRecord
def process(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now(), download=True, read=True, geometryFile=None, geometryStr=''):
'''
Reads the downloaded files and processes them by interpolating missing data and aggregating it to the desired timestep.
'''
# Load existing NetCDFs (to self.loaded)
self.load(dateIni, dateEnd)
# Download if needed
if download:
halt = self.download(dateIni=dateIni, dateEnd=dateEnd)
if halt:
return halt
# Process downloads
if read:
dateList = self._notProcessed(self.filePeriod(dateIni=dateIni, dateEnd=dateEnd))
if len(dateList)>0:
self.readDownloads(dateList, geometryFile=geometryFile, geometryStr=geometryStr)
# Check if loaded and downloaded are compatible
if 'loaded' in self.__dict__:
lat = self.loaded['lat']
lon = self.loaded['lon']
if 'downloaded' in self.__dict__:
if not (lat==self.downloaded['lat']).all() or not (lon==self.downloaded['lon']).all():
raise Exception('Stored and downloaded coordinates do not match.')
else:
lat = self.downloaded['lat']
lon = self.downloaded['lon']
#===================================================================
# # Interpolates the missing values in the matrix. The interpolation is made just on the time dimension
# # Loop through all x - axis 0 of the matrix
# if 'downloaded' in self.__dict__:
# tmplat = self.downloaded['data'].shape[1]
# print('Interpolating missing data:')
# for i0 in range(self.downloaded['data'].shape[1]):
# self._printProgress(i0, tmplat, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
# # Loop through all y - axis 1 of the matrix
# for i1 in range(self.downloaded['data'].shape[2]):
# # Temporary array with all precipitation values (z axis) for a given lat and lon (x and y axis)
# tmp = np.squeeze(self.downloaded['data'][:, i0, i1])
# nans = np.isnan(tmp)
# tmpNanSum = np.sum(nans)
# if tmpNanSum>0 and tmpNanSum!=nans.shape[0]:
# # Creates an array with the size of the temporary but with values that correspond to the axis [0,1,2..., n]
# idx = np.arange(len(tmp))
# valid = np.logical_not(nans)
# # The interpolate function requires the index of the points to interpolate (idx[nans]),
# # the index of the points with valid values (idx[valid]) and
# # the valid values tha will be used to interpolate (tmp[valid])
# self.downloaded['data'][nans, i0, i1]=np.interp(idx[nans], idx[valid], tmp[valid])
# self._printProgress(tmplat, tmplat, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
#===================================================================
# Join downloads and stored data (loaded)
if 'loaded' not in self.__dict__.keys():
dates = self.filePeriod(dateIni=dateIni, dateEnd=dateEnd)
self.loaded = {}
self.loaded['lat'] = lat
self.loaded['lon'] = lon
self.loaded['dates'] = dates
self.loaded['data'] = np.empty((len(dates), len(lat), len(lon)), dtype=self.precision)
self.loaded['data'][:] = np.nan
self.loaded['missing'] = np.ones((len(dates),), dtype=np.bool)
if 'downloaded' in self.__dict__.keys():
idxsLoaded = self.ismember(self.downloaded['dates'], self.loaded['dates'])
idxsDownloaded = self.ismember(self.loaded['dates'], self.downloaded['dates'])
self.loaded['data'][idxsLoaded, :, :] = self.downloaded['data'][idxsDownloaded, :, :]
self.loaded['missing'][idxsLoaded] = self.downloaded['missing'][idxsDownloaded]
def plot(self):
mean=np.flipud(np.nanmean(self.loaded['data'], 0)*365*8)
ax = plt.matshow(mean)
plt.colorbar(ax)
plt.show(block=True)
def save(self, overwriteAll=False, overwriteIncomplete=True):
'''
Splits the data in blocks of 1 month and stores them in NetCDF files
'''
tmpDates = np.array(self.loaded['dates'])
monthIdxs = np.array(self._splitByMonth(self.loaded['dates']))
uniqueMonthIdxs = np.unique(monthIdxs)
print('Saving NetCDFs:')
tmpPeriods = len(uniqueMonthIdxs)
for c0, i0 in enumerate(uniqueMonthIdxs):
self._printProgress(c0, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
tmp = np.where(monthIdxs==i0)[0]
monthDates = tmpDates[tmp]
if not overwriteAll:
if monthDates[0].year in self.netCDFDict.keys() and monthDates[0].month in self.netCDFDict[ monthDates[0].year].keys():
if self.netCDFDict[monthDates[0].year][monthDates[0].month][1]==True:
# prevents complete files from being overwritten
continue
else:
# incomplete file
if not overwriteIncomplete:
# prevents overwriting
continue
monthData = self.loaded['data'][tmp, :, :]
monthMissing = self.loaded['missing'][tmp]
rootgrp = Dataset(os.path.join(self.dataFolder, self.filePrefix + '_%04d.%02d.nc' % (monthDates[0].year, monthDates[0].month)), 'w', format='NETCDF4', clobber=True)
time = rootgrp.createDimension('time', None)
lat = rootgrp.createDimension('lat', monthData.shape[1])
lon = rootgrp.createDimension('lon', monthData.shape[2])
times = rootgrp.createVariable('time', np.double, dimensions=('time',), zlib=True)
lats = rootgrp.createVariable('lat', np.double, dimensions=('lat',), zlib=True)
lons = rootgrp.createVariable('lon', np.double, dimensions=('lon',), zlib=True)
precips = rootgrp.createVariable('precipitation', self.precision, dimensions=('time', 'lat', 'lon'), zlib=True, least_significant_digit=self.significantDigits)
missing = rootgrp.createVariable('missing', np.int8, dimensions=('time'), zlib=True)
rootgrp.description = 'Rainfall data (' + self.filePrefix + ')'
rootgrp.history = 'Created the ' + str(dt.datetime.now())
lats.units = 'degrees of the center of the pixel (WGS84)'
lons.units = 'degrees of the center of the pixel (WGS84)'
times.units = "hours since 0001-01-01 00:00:00.0"
times.calendar = 'standard'
precips.units = 'mm of rain accumulated over a 3-hour interval centered on the time reference [-1.5, +1.5]'
# Check completeness
monthDates[0] + relativedelta(months=1)
tmp = self.filePeriod(dateIni=monthDates[-1] - relativedelta(months=1), dateEnd=monthDates[0] + relativedelta(months=1))
tmp = [dt0 for dt0 in tmp if dt0.month==monthDates[0].month and dt0.year==monthDates[0].year]
if len(self.ismember(tmp, monthDates)) == len(tmp):
# The month is complete
if np.all(np.logical_not(monthMissing)):
rootgrp.complete = 1
else:
rootgrp.complete = 0
else:
# The month is not complete
rootgrp.complete = 0
if rootgrp.complete==0:
warnings.warn(' netCDF not complete (' + self.filePrefix + '_%04d.%02d.nc' % (monthDates[0].year, monthDates[0].month) + ').', UserWarning)
lats[:] = self.loaded['lat']
lons[:] = self.loaded['lon']
times[:] = date2num(monthDates, units=times.units, calendar=times.calendar)
precips[:, :, :] = monthData
missing[:] = monthMissing
rootgrp.close()
self._printProgress(tmpPeriods, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
def load(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now()):
'''
Loads the data from 1-month NetCDF files into a numpy array
'''
dates = self.filePeriod(dateIni=dateIni, dateEnd=dateEnd)
yearMonth = list(set([(dt.year, dt.month) for dt in dates]))
print('Attempting to load NetCDFs:')
tmpPeriods = len(yearMonth)
data = None
for i0, t0 in enumerate(yearMonth):
self._printProgress(i0, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
if t0[0] in self.netCDFDict.keys() and t0[1] in self.netCDFDict[t0[0]].keys():
tmp = self._loadNetCDF(os.path.join(self.dataFolder, self.netCDFDict[t0[0]][t0[1]][0]))
self.netCDFDict[t0[0]][t0[1]][1] = tmp['complete']
if 'loaded' not in self.__dict__:
self.loaded = {}
self.loaded['dates'] = dates
self.loaded['lat'] = tmp['lat']
self.loaded['lon'] = tmp['lon']
self.loaded['data'] = np.empty((len(dates), len(self.loaded['lat']), len(self.loaded['lon'])), dtype=self.precision)
self.loaded['data'][:] = np.nan
self.loaded['missing'] = np.ones((len(dates),), dtype=np.bool)
idxsLoaded = np.array(self.ismember(tmp['dates'], self.loaded['dates']))
idxsTmp = np.array(self.ismember(self.loaded['dates'], tmp['dates']))
self.loaded['data'][idxsLoaded, :, :] = tmp['data'][idxsTmp, :, :]
self.loaded['missing'][idxsLoaded] = tmp['missing'][idxsTmp]
self._printProgress(tmpPeriods, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
def getGeometryInfo(self):
if 'geometryInfo' not in self.__dict__.keys():
return ''
else:
tmp = {}
tmp['lat'] = self.geometryInfo['lat'].tolist()
tmp['lon'] = self.geometryInfo['lon'].tolist()
tmp['idxOriginal'] = (self.geometryInfo['idxOriginal'][0].tolist(), self.geometryInfo['idxOriginal'][1].tolist())
tmp['idxReduced'] = (self.geometryInfo['idxReduced'][0].tolist(), self.geometryInfo['idxReduced'][1].tolist())
return json.dumps(tmp)
def setGeometryInfo(self, jsonStr):
if jsonStr != '':
tmp = json.loads(jsonStr)
tmp['lat'] = np.array(tmp['lat'])
tmp['lon'] = np.array(tmp['lon'])
tmp['idxOriginal'] = (np.array(tmp['idxOriginal'][0]), np.array(tmp['idxOriginal'][1]))
tmp['idxReduced'] = (np.array(tmp['idxReduced'][0]), np.array(tmp['idxReduced'][1]))
self.geometryInfo = tmp
def filePeriod(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now()):
# Define the period of time to retrieve files and creates a list of dates
return [d0.astype(object) for d0 in np.arange(dateIni, dateEnd+dt.timedelta(**self.timestep), dt.timedelta(**self.timestep))]
def aggregate(self, dates, geometryStr=None, missingTolerance=0.1):
missingMax = round(len(json.loads(geometryStr)['lat'])*len(json.loads(geometryStr)['lon'])-len(json.loads(geometryStr)['idxReduced'][0])*(1-missingTolerance))
values = np.empty_like(dates, dtype=np.double)*np.NaN
print('Aggregating:')
tmpPeriods = len(dates)
openFileName = None
for i0, d0 in enumerate(dates):
year = d0.year
month = d0.month
if np.mod(i0,1000)==0:
self._printProgress(i0, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Aggregating %02u.%04u' % (month, year)),
'progress': i0/tmpPeriods,
'state': 'PROGRESS'})
except Exception as ex:
print(str(ex))
if year in self.netCDFDict.keys() and month in self.netCDFDict[year].keys():
if openFileName != self.netCDFDict[year][month][0]:
openFileName = self.netCDFDict[year][month][0]
openFileData = self._loadNetCDF(os.path.join(self.dataFolder, openFileName))
idx = np.array(self.ismember((dates[i0],), openFileData['dates']))
if np.sum(np.isnan(openFileData['data'][idx, :, :]))<=missingMax:
values[i0] = np.nanmean(openFileData['data'][idx, :, :])
else:
values[i0] = np.NaN
self._printProgress(tmpPeriods, tmpPeriods, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
return values
def _getGeometyIdxs(self, lat, lon, filePath=None):
if filePath!=None:
# load geometry
with open(filePath, 'r') as myfile:
geojsonStr=myfile.read()
obj = geojson.loads(geojsonStr)
# compute logical matrix of valid pixels
chosenPixels = np.zeros((len(lat), len(lon)),dtype=np.bool)
for f0 in obj['features']:
if f0['type'] != 'Feature':
continue
if f0['geometry']['type'] == 'Polygon':
g0 = f0['geometry']['coordinates']
tmp = self._intersection(lon, lat, [i0[0] for i0 in g0[0]], [i0[1] for i0 in g0[0]])
if len(g0)>1:
for i0 in range(1, len(g0)):
tmp = np.logical_and(tmp, np.logical_not(self._intersection(lon, lat, [i0[0] for i0 in g0[i0]], [i0[1] for i0 in g0[i0]])))
chosenPixels = np.logical_or(chosenPixels, tmp)
elif f0['geometry']['type'] == 'MultiPolygon':
tmp = np.zeros((len(lat), len(lon)),dtype=np.bool)
for g0 in f0['geometry']['coordinates']:
tmp = np.logical_or(tmp, self._intersection(lon, lat, [i0[0] for i0 in g0[0]], [i0[1] for i0 in g0[0]]))
if len(g0)>1:
for i0 in range(1, len(g0)):
tmp = np.logical_and(tmp, np.logical_not(self._intersection(lon, lat, [i0[0] for i0 in g0[i0]], [i0[1] for i0 in g0[i0]])))
chosenPixels = np.logical_or(chosenPixels, tmp)
#=======================================================================
# plt.imshow(np.flipud(chosenPixels), cmap='Greys', interpolation='nearest')
#=======================================================================
# get indexes to retrieve information
geometryInfo = {}
tmp = np.where(chosenPixels!=0)
geometryInfo['lat'] = lat[np.min(tmp[0]):np.max(tmp[0])+1]
geometryInfo['lon'] = lon[np.min(tmp[1]):np.max(tmp[1])+1]
geometryInfo['idxOriginal'] = np.where(chosenPixels)
geometryInfo['idxReduced'] = np.where(chosenPixels[np.min(tmp[0]):np.max(tmp[0])+1, np.min(tmp[1]):np.max(tmp[1])+1])
else:
geometryInfo = {}
geometryInfo['lat'] = lat
geometryInfo['lon'] = lon
tmpLat = np.repeat(np.expand_dims(range(len(lat)), 1), len(lon), axis=1)
tmpLon = np.repeat(np.expand_dims(range(len(lon)), 0), len(lat), axis=0)
geometryInfo['idxOriginal'] = (tmpLat.ravel(), tmpLon.ravel())
geometryInfo['idxReduced'] = geometryInfo['idxOriginal']
return geometryInfo
def _intersection(self, pointsX, pointsY, borderX, borderY):
print('Processing geometry:')
pixels = len(pointsX) * len(pointsY)
segments = len(borderX)-1
maxBorderY = max(borderY)
maxBorderX = max(borderX)
minBorderY = min(borderY)
minBorderX = min(borderX)
#=======================================================================
# # Make sure all data is computed within the same range
# pointsX = np.mod(pointsX, 360)
# pointsX = np.mod(pointsX + 360, 360)
# pointsX[pointsX>180] = pointsX[pointsX>180]-360
#
# pointsY = np.mod(pointsY, 180)
# pointsY = np.mod(pointsY + 180, 180)
# pointsY[pointsY>90] = pointsY[pointsY>90]-180
#=======================================================================
# Defining matrices for calculation
pointsX = np.expand_dims(pointsX, 1)
pointsY = np.expand_dims(pointsY, 0)
pixelBaseX = np.repeat(pointsX, pointsY.shape[1], axis=1).ravel()
pixelBaseY = np.repeat(pointsY, pointsX.shape[0], axis=0).ravel()
validBoolX = np.logical_and(pixelBaseX<=maxBorderX, pixelBaseX>=minBorderX)
validBoolY = np.logical_and(pixelBaseY<=maxBorderY, pixelBaseY>=minBorderY)
validBool = np.logical_and(validBoolX, validBoolY)
L = np.zeros((pixels, segments), dtype=np.bool)
#=======================================================================
# R = np.empty((np.sum(validBool), segments))
#=======================================================================
pixelRedX = pixelBaseX[validBool]
pixelRedY = pixelBaseY[validBool]
x2 = 9999
y2 = 9999
#=======================================================================
# a = (pixelBaseX*y2-pixelBaseY*x2)
# d = (pixelBaseX-x2)
# e = (pixelBaseY-y2)
#=======================================================================
a = (pixelRedX*y2-pixelRedY*x2)
d = (pixelRedX-x2)
e = (pixelRedY-y2)
for i0 in range(segments):
# TODO: extend to cases where the border goes beyond [-180, 180]
if np.mod(i0, 20)==0:
self._printProgress(i0, segments-1, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
try:
current_task.update_state(state='PROGRESS', meta={'message': ('warning', 'Computing geometry'),
'progress': i0/(segments-1),
'state': 'PROGRESS'})
except Exception:
pass
x3 = borderX[i0]
y3 = borderY[i0]
x4 = borderX[i0+1]
y4 = borderY[i0+1]
# Computing intersection coordinates
b = (x3*y4-y3*x4)
c = d*(y3-y4)-e*(x3-x4)
px = (a*(x3-x4)-d*b)/c
py = (a*(y3-y4)-e*b)/c
# Bounding intersections to the real lines
#===================================================================
# lx = np.logical_and(
# px>=pixelBaseX,
# np.logical_or(
# np.logical_and(px<=x3+1E-6, px>=x4-1E-6),
# np.logical_and(px<=x4+1E-6, px>=x3-1E-6)))
# ly = np.logical_and(
# py>=pixelBaseY,
# np.logical_or(
# np.logical_and(py<=y3+1E-6, py>=y4-1E-6),
# np.logical_and(py<=y4+1E-6, py>=y3-1E-6)))
#===================================================================
lx = np.logical_and(
px>=pixelRedX,
np.logical_or(
np.logical_and(px<=x3+1E-6, px>=x4-1E-6),
np.logical_and(px<=x4+1E-6, px>=x3-1E-6)))
ly = np.logical_and(
py>=pixelRedY,
np.logical_or(
np.logical_and(py<=y3+1E-6, py>=y4-1E-6),
np.logical_and(py<=y4+1E-6, py>=y3-1E-6)))
#===================================================================
# L[:,i0] = np.logical_and(lx, ly)
#===================================================================
L[validBool,i0] = np.logical_and(lx, ly)
L = np.mod(np.sum(L, 1), 2)==1
L = np.reshape(L, (pointsY.shape[1], pointsX.shape[0]), order='F')
self._printProgress(segments-1, segments-1, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
return L
def _listData(self):
# List and pre-process available netCDF files
self.netCDFDict = {}
for f0 in os.listdir(self.dataFolder):
tmp = re.match('^' + self.filePrefix + '_([\d]{4}).([\d]{2}).nc$', f0)
if tmp != None:
tmp = (tmp.group(0), int(tmp.group(1)), int(tmp.group(2)))
if tmp[1] not in self.netCDFDict.keys():
self.netCDFDict[tmp[1]] = {}
self.netCDFDict[tmp[1]][tmp[2]] = [tmp[0], True]
def _downloadFile(self, toDownload):
'''
Downloads the file from the url and saves it in the directory folderPath with the name fileName.
'''
fileName, url = toDownload
# Opens the web page and creates a file in the folder folderPAth and with the name fileName
try:
#===============================================================================
# passman = request.HTTPPasswordMgrWithDefaultRealm()
# passman.add_password(self.realm, url, self.username, self.password)
#
# authhandler = request.HTTPBasicAuthHandler(passman)
# opener = request.build_opener(authhandler)
# request.install_opener(opener)
#===============================================================================
u = request.urlopen(url)
f = open(fileName, 'wb')
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
f.write(buffer)
# Closes the file
f.close()
u.close()
return os.path.getsize(fileName)
except Exception as ex:
warnings.warn(str(ex), UserWarning)
return -1
def _printProgress (self, iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):
'''
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
'''
if total>0:
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
print("\n")
def _sumChunksMatrix(self, matrix, chunkSize, axis=-1):
'''
Sums sequences of values along a given axis.
The chunkSize defines the size of the sequence to sum.
'''
shape = matrix.shape
if axis < 0:
axis += matrix.ndim
shape = shape[:axis] + (-1, chunkSize) + shape[axis+1:]
x = matrix.reshape(shape)
return x.sum(axis=axis+1)
def ismember(self, a, b):
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
return [bind.get(itm, None) for itm in a if itm in bind.keys()]
def _findLastNetCDF(self):
tmp0 = max(self.netCDFDict.keys())
tmp1 = max(self.netCDFDict[tmp0].keys())
return (tmp0, tmp1, self.netCDFDict[tmp0][tmp1])
def _notProcessed(self, dateRange):
tmpDateMonth = [(dt.year, dt.month) for dt in dateRange]
for i0 in range(len(dateRange)-1,-1,-1):
tmp = dateRange[i0]
if tmp.year in self.netCDFDict.keys():
if tmp.month in self.netCDFDict[tmp.year].keys():
if self.netCDFDict[tmp.year][tmp.month][1]:
# the file is complete
dateRange.pop(i0)
else:
# the file is not complete
if 'loaded' in self.__dict__:
if not self.loaded['missing'][self.loaded['dates'].index(dateRange[i0])]:
# this value is not missing
dateRange.pop(i0)
else:
dateRange.pop(i0)
return dateRange
def _splitByMonth(self, dateRange):
tmpDateMonth = [(dt.year, dt.month) for dt in dateRange]
uniqueMonths = list(set(tmpDateMonth))
tmpTuple = None
idxs = []
for s0 in tmpDateMonth:
if s0 != tmpTuple:
tmpIdx = uniqueMonths.index(s0)
tmpTuple = s0
idxs.append(tmpIdx)
return idxs
def _loadNetCDF(self, path, data=True):
rootgrp = Dataset(path, 'r', format="NETCDF4")
out = {}
tmp = rootgrp.variables['time']
out['dates'] = num2date(tmp[:], tmp.units, tmp.calendar)
out['lat'] = rootgrp.variables['lat'][:]
out['lon']= rootgrp.variables['lon'][:]
out['complete'] = rootgrp.complete == 1
out['missing'] = rootgrp.variables['missing'][:]
if data:
out['data'] = rootgrp.variables['precipitation'][:,:,:]
rootgrp.close()
return out
class TRMMSatelliteRainfall(SatelliteData):
'''
Data downloaded from:
http://mirador.gsfc.nasa.gov/cgi-bin/mirador/presentNavigation.pl?tree=project&&dataGroup=Gridded&project=TRMM&dataset=3B42:%203-Hour%200.25%20x%200.25%20degree%20merged%20TRMM%20and%20other%20satellite%20estimates&version=007
'''
filePrefix = 'trmm3B42v7'
precision = np.single
significantDigits = 2
downloadFailThreshold = 50000
productSite = 'http://trmm.gsfc.nasa.gov/'
downloadSite = 'http://mirador.gsfc.nasa.gov/cgi-bin/mirador/presentNavigation.pl?tree=project&&dataGroup=Gridded&project=TRMM&dataset=3B42:%203-Hour%200.25%20x%200.25%20degree%20merged%20TRMM%20and%20other%20satellite%20estimates&version=007'
description = 'Tropical Rainfall Measuring Mission, TMPA 3B42 version 7. Accumulated rainfall over 3h intervals in mm. Grid of 0.25x0.25 deg.'
realm = 'http://disc2.gesdisc.eosdis.nasa.gov/'
timestep = {}
timestep['hours'] = 3
units = 'mm/3h'
fileExtension = '.gz'
def downloadList(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now()):
'''
implementation for TRMM 3B42 data
returns a tuple containing a list of dates, a numpy 3D matrix with all the data, and numpy arrays with the pixel latitudes and longitudes
'''
urlFormat0="http://disc2.gesdisc.eosdis.nasa.gov/daac-bin/OTF/HTTP_services.cgi?FILENAME=%2Fs4pa%2FTRMM_L3%2FTRMM_3B42%2F{1}%2F{2}%2F3B42.{0}.7.HDF.Z&FORMAT=L2d6aXA&LABEL=3B42.{0}.7.nc.gz&SHORTNAME=TRMM_3B42&SERVICE=HDF_TO_NetCDF&VERSION=1.02&DATASET_VERSION=007"
urlFormat1="http://disc2.gesdisc.eosdis.nasa.gov/daac-bin/OTF/HTTP_services.cgi?FILENAME=%2Fs4pa%2FTRMM_L3%2FTRMM_3B42%2F{1}%2F{2}%2F3B42.{0}.7A.HDF.Z&FORMAT=L2d6aXA&LABEL=3B42.{0}.7.nc.gz&SHORTNAME=TRMM_3B42&SERVICE=HDF_TO_NetCDF&VERSION=1.02&DATASET_VERSION=007"
# Dates and urls to download
dateList = self._notProcessed(self.filePeriod(dateIni=dateIni, dateEnd=dateEnd))
dateList = [dt0.strftime('%Y%m%d.%H') for dt0 in dateList]
urlList=[]
for date in dateList:
year, dayOfYear = self._fDayYear(date)
if int(date[0:4]) < 2000 or year>2010:
urlList.append(urlFormat0.format(date, year, dayOfYear))
elif year==2010 and (int(dayOfYear)>273 or date=='20101001.00'):
urlList.append(urlFormat0.format(date, year, dayOfYear))
else:
urlList.append(urlFormat1.format(date, year, dayOfYear))
# File list
fileList = [os.path.join(self.downloadFolder, '3B42.' + d0 + '.7.nc.gz') for d0 in dateList]
return (fileList, urlList)
def downloadedDates(self):
'''
Provides a list of files in folder that are have a given extension.
'''
# Reads the content of the data folder.
# Returns the list of the files with the file type defined.
filesFolder=os.listdir(self.downloadFolder)
fileList=[]
dateList=[]
for f0 in filesFolder:
if os.path.splitext(f0)[1] == self.fileExtension:
fileList.append(os.path.join(self.downloadFolder, f0))
dateList.append(dt.datetime.strptime(f0[5:16],'%Y%m%d.%H'))
return (fileList, dateList)
def importData(self, fileName):
'''
Imports the data of the files into python.
'''
# Defines the folder in which the temporary files are produced
tmpFolder = self.tmpFolder
# SAFELY create a new file by providing a random name with tmp in the name and extension nc
# The tempfile.mkstemp creates the file and returns an handle
# This is a strange because is not the file but a reference to it (understood by the operative system) that is used to do any operation in it, using the file name probably won't work
fOutIdx, fOutPath = tempfile.mkstemp(suffix='.nc', prefix='tmp', dir=tmpFolder)
# Opens the temporary file and returns the descriptor that can be use to do things with the open file
fOut = os.fdopen(fOutIdx, 'wb+')
# Open the gz file and copy the nc file to the temporary file
# Using the with ... as ... ensures that the gzip file opened is automatically closed in the end
# The lenght=-1 specifies the buffer length, using a negative number makes the copy all at once instead of chunks
# For large files these may lead to a uncontrolled memory consumption
with gzip.open(fileName, 'rb') as fIn:
shutil.copyfileobj(fIn, fOut, length=-1)
fOut.close()
# Reads the file fOut as a netcdf file, refering to it as rootgrp
# Dataset returns an object with the dimensions and variables of the netcdf file, not the data in it
rootgrp = Dataset(fOutPath, "r")
data = rootgrp.variables['pcp'][:, :, :]
longitudes = rootgrp.variables['longitude'][:]
latitudes = rootgrp.variables['latitude'][:]
# Replace missing values with nan
data[data<=-999]=np.nan
# Delete the temporary file
rootgrp.close()
ctr = 0
while ctr<9:
try:
os.remove(fOutPath)
except Exception as ex:
pass
ctr += 1
if ctr==10:
os.remove(fOutPath)
return (data, latitudes, longitudes)
def _fDayYear(self, url):
'''
This function returns the day of the year in 0-365 format and the year
'''
# This is to correct that the date that the hour 00 is named on day n but day of the year n-1
# This affects the year when in the 1st of january and the day when changing between days
# First convert string to date and then, if hour=00 decrease one minute to make it return the previous day
tmpDate = dt.datetime.strptime(url, '%Y%m%d.%H')
if url[-2:]=='00':
tmpDiff = dt.timedelta(minutes=1)
tmpDate -= tmpDiff
return (tmpDate.year, '{dayYear:03d}'.format(dayYear=tmpDate.timetuple().tm_yday))
class TRMMSatelliteRainfallRT(SatelliteData):
'''
Data downloaded from:
ftp://trmmopen.gsfc.nasa.gov/pub/merged/mergeIRMicro/
'''
filePrefix = 'trmm3B42v7RT'
precision = np.single
significantDigits = 2
downloadFailThreshold = 50000
productSite = 'http://trmm.gsfc.nasa.gov/'
downloadSite = 'ftp://trmmopen.gsfc.nasa.gov/pub/merged/mergeIRMicro/'
description = 'Tropical Rainfall Measuring Mission, TMPA 3B42 version 7 real-time. Accumulated rainfall over 3h intervals in mm. Grid of 0.25x0.25 deg.'
realm = 'ftp://trmmopen.gsfc.nasa.gov/'
timestep = {}
timestep['hours'] = 3
units = 'mm/3h'
fileExtension = '.gz'
def downloadList(self, dateIni=dt.datetime(1998, 1, 1, 0), dateEnd=dt.datetime.now()):
'''
implementation for TRMM 3B42 data
returns a tuple containing a list of dates, a numpy 3D matrix with all the data, and numpy arrays with the pixel latitudes and longitudes
'''
urlFormat0 = "ftp://trmmopen.gsfc.nasa.gov/pub/merged/mergeIRMicro/{year}/{month:02d}/3B42RT.{datestr}.7R2.bin.gz"
urlFormat1 = "ftp://trmmopen.gsfc.nasa.gov/pub/merged/mergeIRMicro/{year}/{month:02d}/3B42RT.{datestr}.7.bin.gz"
urlFormat2 = "ftp://trmmopen.gsfc.nasa.gov/pub/merged/mergeIRMicro/{year}/3B42RT.{datestr}.7.bin.gz"
# Dates and urls to download
dateList = self._notProcessed(self.filePeriod(dateIni=dateIni, dateEnd=dateEnd))
urlList=[]
for date in dateList:
year = date.year
month = date.month
if year<2012:
urlList.append(urlFormat0.format(year=year, month=date.month, datestr=date.strftime('%Y%m%d%H')))
elif year>2012:
if year<2014:
urlList.append(urlFormat1.format(year=year, month=date.month, datestr=date.strftime('%Y%m%d%H')))
else:
urlList.append(urlFormat2.format(year=year, datestr=date.strftime('%Y%m%d%H')))
else:
if date<dt.datetime(2012, 11, 7, 6):
urlList.append(urlFormat0.format(year=year, month=date.month, datestr=date.strftime('%Y%m%d%H')))
else:
urlList.append(urlFormat1.format(year=year, month=date.month, datestr=date.strftime('%Y%m%d%H')))
# File list
fileList = [os.path.join(self.downloadFolder, '3B42RT.' + d0.strftime('%Y%m%d%H') + '.7.bin.gz') for d0 in dateList]
return (fileList, urlList)
def downloadedDates(self):
'''
Provides a list of files in folder that are have a given extension.
'''
# Reads the content of the data folder.
# Returns the list of the files with the file type defined.
filesFolder=os.listdir(self.downloadFolder)
fileList=[]
dateList=[]
for f0 in filesFolder:
if os.path.splitext(f0)[1] == self.fileExtension:
fileList.append(os.path.join(self.downloadFolder, f0))
dateList.append(dt.datetime.strptime(f0[7:17],'%Y%m%d%H'))
return (fileList, dateList)
def importData(self, fileName):
'''
Imports the data of the files into python.
'''
with gzip.open(fileName, 'rb') as fIn:
fIn.seek(2880)
data = np.frombuffer(fIn.read(1382400), dtype=np.dtype('>i2')).astype(np.double)
# Replace missing values with nan
data[data==-31999]=np.nan
# Reshape and scale to mm
data = data.reshape((1, 480, 1440))/100*3
longitudes = np.linspace(0.125, 360-0.125, 1440)
latitudes = -np.linspace(-60+0.125, 60-0.125, 480)
return (data, latitudes, longitudes)
def _readBytes(self, fIn):
return(struct.unpack('>h', fIn.read(2))[0]) | JosePedroMatos/Tethys | timeSeries/satelliteData.py | Python | mit | 51,779 | [
"NetCDF"
] | 901edc44b9295976d68cfe786d1b2bb44297c07e2b09960b3d779ca6e5de7ce8 |
"""
Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This file has no alliance specific stuff as far as I can tell.
# qebab, 22/06/08
import re
from munin import loadable
class cost(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 1)
self.paramre = re.compile(r"^\s*(\d+(?:\.\d+)?[MmKk]?)\s+(\S+)(?:\s+(\S+))?")
self.usage = self.__class__.__name__ + " <number> <shipname> [government]"
def execute(self, user, access, irc_msg):
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
ship_number = self.human_readable_number_to_integer(m.group(1))
ship_name = m.group(2)
gov_name = ""
prod_bonus = 1
if m.group(3):
lower_gov_name = m.group(3).lower()
if lower_gov_name in "totalitarianism":
prod_bonus = 1 - float(
self.config.get("Planetarion", "totalitarianism_cost_reduction")
)
gov_name = "Totalitarianism"
elif lower_gov_name in "democracy":
prod_bonus = 1 - float(
self.config.get("Planetarion", "democracy_cost_reduction")
)
gov_name = "Democracy"
if access < self.level:
irc_msg.reply("You do not have enough access to use this command")
return 0
ship = self.get_ship_from_db(ship_name, irc_msg.round)
if not ship:
irc_msg.reply("%s is not a ship" % (ship_name))
return 0
metal = int(ship["metal"] * prod_bonus) * ship_number
crystal = int(ship["crystal"] * prod_bonus) * ship_number
eonium = int(ship["eonium"] * prod_bonus) * ship_number
resource_value = (metal + crystal + eonium) / 150
ship_value = round((ship["total_cost"] * ship_number) / 100)
reply = "Buying %s %s will cost %s metal, %s crystal and %s eonium" % (
ship_number,
ship["name"],
metal,
crystal,
eonium,
)
if prod_bonus != 1:
reply += " as %s" % (gov_name)
reply += ". This gives %s ship value (%s increase)" % (
ship_value,
round(ship_value - resource_value),
)
irc_msg.reply(reply)
return 1
| munin/munin | munin/mod/cost.py | Python | gpl-2.0 | 3,309 | [
"CRYSTAL"
] | 5ccd0f88a8be06aebe06312bbe487bbee14165009a3dd4d94927799fc5c4c8be |
from __future__ import absolute_import, division, print_function
from itertools import chain
from .utils_test import add, inc # noqa: F401
def ishashable(x):
""" Is x hashable?
Examples
--------
>>> ishashable(1)
True
>>> ishashable([1])
False
"""
try:
hash(x)
return True
except TypeError:
return False
def istask(x):
""" Is x a runnable task?
A task is a tuple with a callable first argument
Examples
--------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return type(x) is tuple and x and callable(x[0])
def has_tasks(dsk, x):
"""Whether ``x`` has anything to compute.
Returns True if:
- ``x`` is a task
- ``x`` is a key in ``dsk``
- ``x`` is a list that contains any tasks or keys
"""
if istask(x):
return True
try:
if x in dsk:
return True
except:
pass
if isinstance(x, list):
for i in x:
if has_tasks(dsk, i):
return True
return False
def preorder_traversal(task):
"""A generator to preorder-traverse a task."""
for item in task:
if istask(item):
for i in preorder_traversal(item):
yield i
elif isinstance(item, list):
yield list
for i in preorder_traversal(item):
yield i
else:
yield item
def _get_nonrecursive(d, x, maxdepth=1000):
# Non-recursive. DAG property is checked upon reaching maxdepth.
_list = lambda *args: list(args)
# We construct a nested hierarchy of tuples to mimic the execution stack
# of frames that Python would maintain for a recursive implementation.
# A frame is associated with a single task from a Dask.
# A frame tuple has three elements:
# 1) The function for the task.
# 2) The arguments for the task (typically keys in the Dask).
# Arguments are stored in reverse order, and elements are popped
# as they are evaluated.
# 3) The calculated results of the arguments from (2).
stack = [(lambda x: x, [x], [])]
while True:
func, args, results = stack[-1]
if not args:
val = func(*results)
if len(stack) == 1:
return val
stack.pop()
stack[-1][2].append(val)
continue
elif maxdepth and len(stack) > maxdepth:
cycle = getcycle(d, x)
if cycle:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
maxdepth = None
key = args.pop()
if isinstance(key, list):
stack.append((_list, list(key[::-1]), []))
continue
elif ishashable(key) and key in d:
args.append(d[key])
continue
elif istask(key):
stack.append((key[0], list(key[:0:-1]), []))
else:
results.append(key)
def _get_recursive(d, x):
# recursive, no cycle detection
if isinstance(x, list):
return [_get_recursive(d, k) for k in x]
elif ishashable(x) and x in d:
return _get_recursive(d, d[x])
elif istask(x):
func, args = x[0], x[1:]
args2 = [_get_recursive(d, k) for k in args]
return func(*args2)
else:
return x
def get(d, x, recursive=False):
""" Get value from Dask
Examples
--------
>>> inc = lambda x: x + 1
>>> d = {'x': 1, 'y': (inc, 'x')}
>>> get(d, 'x')
1
>>> get(d, 'y')
2
"""
_get = _get_recursive if recursive else _get_nonrecursive
if isinstance(x, list):
return tuple(get(d, k) for k in x)
elif x in d:
return _get(d, x)
raise KeyError("{0} is not a key in the graph".format(x))
def get_dependencies(dsk, key=None, task=None, as_list=False):
""" Get the immediate tasks on which this task depends
Examples
--------
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> get_dependencies(dsk, 'x')
set([])
>>> get_dependencies(dsk, 'y')
set(['x'])
>>> get_dependencies(dsk, 'z') # doctest: +SKIP
set(['x', 'y'])
>>> get_dependencies(dsk, 'w') # Only direct dependencies
set(['z'])
>>> get_dependencies(dsk, 'a') # Ignore non-keys
set(['x'])
>>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly
set(['x'])
"""
if key is not None:
arg = dsk[key]
elif task is not None:
arg = task
else:
raise ValueError("Provide either key or task")
result = []
work = [arg]
while work:
new_work = []
for w in work:
typ = type(w)
if typ is tuple and w and callable(w[0]): # istask(w)
new_work += w[1:]
elif typ is list:
new_work += w
elif typ is dict:
new_work += w.values()
else:
try:
if w in dsk:
result.append(w)
except TypeError: # not hashable
pass
work = new_work
return result if as_list else set(result)
def get_deps(dsk):
""" Get dependencies and dependents from dask dask graph
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> dependencies
{'a': set([]), 'c': set(['b']), 'b': set(['a'])}
>>> dependents
{'a': set(['b']), 'c': set([]), 'b': set(['c'])}
"""
dependencies = {k: get_dependencies(dsk, task=v)
for k, v in dsk.items()}
dependents = reverse_dict(dependencies)
return dependencies, dependents
def flatten(seq):
"""
>>> list(flatten([1]))
[1]
>>> list(flatten([[1, 2], [1, 2]]))
[1, 2, 1, 2]
>>> list(flatten([[[1], [2]], [[1], [2]]]))
[1, 2, 1, 2]
>>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples
[(1, 2), (1, 2)]
>>> list(flatten((1, 2, [3, 4]))) # support heterogeneous
[1, 2, 3, 4]
"""
if isinstance(seq, str):
yield seq
else:
for item in seq:
if isinstance(item, list):
for item2 in flatten(item):
yield item2
else:
yield item
def reverse_dict(d):
"""
>>> a, b, c = 'abc'
>>> d = {a: [b, c], b: [c]}
>>> reverse_dict(d) # doctest: +SKIP
{'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}
"""
terms = list(d.keys()) + list(chain.from_iterable(d.values()))
result = dict((t, set()) for t in terms)
for k, vals in d.items():
for val in vals:
result[val].add(k)
return result
def subs(task, key, val):
""" Perform a substitution on a task
Examples
--------
>>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP
(inc, 1)
"""
if not istask(task):
try:
if type(task) is type(key) and task == key:
return val
except Exception:
pass
if isinstance(task, list):
return [subs(x, key, val) for x in task]
return task
newargs = []
for arg in task[1:]:
if istask(arg):
arg = subs(arg, key, val)
elif isinstance(arg, list):
arg = [subs(x, key, val) for x in arg]
elif type(arg) is type(key) and arg == key:
arg = val
newargs.append(arg)
return task[:1] + tuple(newargs)
def _toposort(dsk, keys=None, returncycle=False, dependencies=None):
# Stack-based depth-first search traversal. This is based on Tarjan's
# method for topological sorting (see wikipedia for pseudocode)
if keys is None:
keys = dsk
elif not isinstance(keys, list):
keys = [keys]
if not returncycle:
ordered = []
# Nodes whose descendents have been completely explored.
# These nodes are guaranteed to not be part of a cycle.
completed = set()
# All nodes that have been visited in the current traversal. Because
# we are doing depth-first search, going "deeper" should never result
# in visiting a node that has already been seen. The `seen` and
# `completed` sets are mutually exclusive; it is okay to visit a node
# that has already been added to `completed`.
seen = set()
if dependencies is None:
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
for key in keys:
if key in completed:
continue
nodes = [key]
while nodes:
# Keep current node on the stack until all descendants are visited
cur = nodes[-1]
if cur in completed:
# Already fully traversed descendants of cur
nodes.pop()
continue
seen.add(cur)
# Add direct descendants of cur to nodes stack
next_nodes = []
for nxt in dependencies[cur]:
if nxt not in completed:
if nxt in seen:
# Cycle detected!
cycle = [nxt]
while nodes[-1] != nxt:
cycle.append(nodes.pop())
cycle.append(nodes.pop())
cycle.reverse()
if returncycle:
return cycle
else:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
next_nodes.append(nxt)
if next_nodes:
nodes.extend(next_nodes)
else:
# cur has no more descendants to explore, so we're done with it
if not returncycle:
ordered.append(cur)
completed.add(cur)
seen.remove(cur)
nodes.pop()
if returncycle:
return []
return ordered
def toposort(dsk, dependencies=None):
""" Return a list of keys of dask sorted in topological order."""
return _toposort(dsk, dependencies=dependencies)
def getcycle(d, keys):
""" Return a list of nodes that form a cycle if Dask is not a DAG.
Returns an empty list if no cycle is found.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}
>>> getcycle(d, 'x')
['x', 'z', 'y', 'x']
See Also
--------
isdag
"""
return _toposort(d, keys=keys, returncycle=True)
def isdag(d, keys):
""" Does Dask form a directed acyclic graph when calculating keys?
``keys`` may be a single key or list of keys.
Examples
--------
>>> inc = lambda x: x + 1
>>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')
True
>>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')
False
See Also
--------
getcycle
"""
return not getcycle(d, keys)
def quote(x):
""" Ensure that this value remains this value in a dask graph
Some values in dask graph take on special meaning. Sometimes we want to
ensure that our data is not interpreted but remains literal.
>>> quote((add, 1, 2)) # doctest: +SKIP
(tuple, [add, 1, 2])
"""
if istask(x):
return (tuple, list(map(quote, x)))
return x
| jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/core.py | Python | mit | 11,612 | [
"VisIt"
] | f48b2fa1fc437cb9a9ae527bc5b2241dc41a24e93552e44277a883e1c4f7fc5a |
#
# Author: Henrique Pereira Coutada Miranda
# Run a IP calculation using yambo
#
from __future__ import print_function
import sys
from yambopy import *
from qepy import *
import argparse
from schedulerpy import *
import matplotlib.pyplot as plt
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-dg','--doublegrid', action="store_true", help='Use double grid')
parser.add_argument('-c', '--calc', action="store_true", help='calculate the IP absorption')
parser.add_argument('-p', '--plot', action="store_true", help='plot the results')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
yambo = 'yambo'
p2y = 'p2y'
ypp = 'ypp'
folder = 'ip'
prefix = 'bn'
scheduler = Scheduler.factory
#check if the SAVE folder is present
if not os.path.isdir('database/SAVE'):
print('preparing yambo database')
p2y_run = scheduler()
p2y_run.add_command('mkdir -p database')
p2y_run.add_command('cd nscf/bn.save; %s > %s.log'%(p2y,p2y))
p2y_run.add_command('%s > %s.log'%(yambo,yambo))
p2y_run.add_command('mv SAVE ../../database/')
p2y_run.run()
if not os.path.islink('%s/SAVE'%folder):
s = scheduler()
s.add_command('mkdir -p %s'%folder)
s.add_command('cd %s; ln -s ../database/SAVE .'%folder)
if not args.doublegrid: s.add_command('cd .. ; rm -f database/SAVE/ndb.Double_Grid')
s.run()
#initialize the double grid
if args.doublegrid and not os.path.isfile('database/SAVE/ndb.Double_Grid'):
#check if the double grid nscf cycle is present
if os.path.isdir('nscf_double/%s.save'%prefix):
print('nscf_double calculation found!')
else:
print('nscf_double calculation not found!')
exit()
if not os.path.isdir('database_double/SAVE'):
print('preparing yambo double database')
shell = scheduler()
shell.add_command('cd nscf_double/%s.save; %s; %s'%(prefix,p2y,yambo))
shell.add_command('cd ../../')
shell.add_command('mkdir -p database_double')
shell.add_command('mv nscf_double/%s.save/SAVE database_double'%prefix)
shell.run()
#initialize the double grid
print("creating double grid")
yppin = YamboIn.from_runlevel('%s -m',filename='ypp.in',executable=ypp,folder='database')
yppin['DbGd_DB1_paths'] = ["../database_double"]
yppin.arguments.append('SkipCheck')
yppin.write('database/ypp.in')
shell = scheduler()
shell.add_command('cd database; %s'%ypp)
shell.add_command('cd ../%s ; rm -rf yambo o-*'%folder)
#print(shell)
shell.run()
if args.calc:
#create the yambo input file
y = YamboIn.from_runlevel('%s -o g -V all'%yambo,folder=folder)
y['FFTGvecs'] = [30,'Ry']
y['BndsRnXs'] = [1,30]
y['QpntsRXd'] = [[1,1],'']
y['ETStpsXd'] = 500
y.write('%s/yambo_run.in'%folder)
print('running yambo')
yambo_run = scheduler()
yambo_run.add_command('cd %s; %s -F yambo_run.in -J yambo'%(folder,yambo))
if args.doublegrid: yambo_run.add_command('cd ..; rm database/SAVE/ndb.Double_Grid')
yambo_run.run()
if args.plot:
# Plot absorption spectrum
data=np.genfromtxt('%s/o-yambo.eps_q1_ip'%folder,usecols=(0,1))
fig = plt.figure(figsize=(4,5))
ax = fig.add_axes( [ 0.20, 0.20, 0.70, 0.70 ])
plt.plot(data[:,0],data[:,1],'-',c='b',label='IP Absorption')
plt.legend()
plt.show()
| alexmoratalla/yambopy | tutorial/bn/ip_bn.py | Python | bsd-3-clause | 3,433 | [
"Yambo"
] | ae12322553ad114ecd87e00a79a376ef8fe420ce99af26171ade4253946d7e27 |
# -*- coding: utf-8 -*-
"""
Created on 2 Jul 2019
@author: Thera Pals & Éric Piel
Copyright © 2019-2021 Thera Pals, Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
from can import CanError
import can
from canopen import node
import canopen # TODO add to project requirements.
from canopen.nmt import NmtError
import logging
import numbers
from odemis import model, util
import os
import pkg_resources
import random
import time
# The main objects that could be of interest are:
# 0x6100 AI Input FV # Field Value: location of the laser spot on the linear CCD (in px)
# 0x6126 AI Scaling Factor # Conversion ratio from Input FV to Input PV (µm/px)
# 0x6130 AI Input PV # Process Value: location converted to focus position (unit: µm)
# 0x6138 AI Tare zero # Net PV = Input PV - Tare zero (unit: µm)
# 0x6139 AI Autotare # write 0x61726174 ('tara' in hex), to set Tare zero to the current value
# 0x6140 AI Net PV # Net PV = Input PV - Tare zero (unit: µm)
POS_SDO = "AI Net PV"
# Expected values, to compare to what is returned by the device
VENDOR_ID = 0xc0ffee # Not an official ID, but tasty enough for our internal use
PRODUCT_CODE = 0x0001
# The maximum acceptable duration since a position update.
# If the latest known position is "older", the getter will explicitly read the position.
MAX_POS_AGE = 0.1 # s
class FocusTrackerCO(model.HwComponent):
"""
Driver for the in-house (Delmic) focus tracker device.
It's connected via CANopen.
"""
def __init__(self, name, role, channel, node_idx, datasheet=None, inverted=None, ** kwargs):
"""
channel (str): channel name of can bus (eg, "can0"). Use "fake" for a simulator.
node_idx (int): node index of focus tracker
datasheet (str or None): absolute or relative path to .dcf configuration file
This can be used to set default parameters value. If None, it will use the
default .eds file.
inverted (set of str): pass {"z"} to invert the reported position (ie, * -1).
"""
model.HwComponent.__init__(self, name, role, **kwargs)
if inverted is None:
inverted = set()
if set(inverted) > {"z"}:
raise ValueError("Only axis z exists, but got inverted axes: %s." %
(", ".join(inverted),))
self._inverted = "z" in inverted
# Conveniently, python-canopen accepts both an opened File and a filename (str)
if datasheet is None:
logging.debug("Using default focus tracker datasheet")
datasheet = pkg_resources.resource_filename("odemis.driver", "FocusTracker.eds")
elif not os.path.isabs(datasheet):
# For relative path, use the current path as root
datasheet = os.path.join(os.path.dirname(__file__), datasheet)
self.network, self.node = self._connect(channel, node_idx, datasheet)
# For recovery
self._channel = channel
self._node_idx = node_idx
self._datasheet = datasheet
# Do not leave canopen log to DEBUG, even if the general log level is set
# to DEBUG, because it generates logs for every CAN packet, which is too much.
canlog = logging.getLogger("canopen")
canlog.setLevel(max(canlog.getEffectiveLevel(), logging.INFO))
self._swVersion = "python-canopen v%s, python-can v%s" % (canopen.__version__, can.__version__)
rev_num = self.node.sdo["Identity object"]["Revision number"].raw
major, minor = (rev_num >> 16) & 0xffff, rev_num & 0xffff
sn = self.node.sdo["Identity object"]["Serial number"].raw
self._hwVersion = "Focus tracker {}.{} (s/n : {})".format(major, minor, sn)
logging.info("Connected to %s", self._hwVersion)
# Create SDO communication objects to communicate
self._position_sdo = self.node.sdo[POS_SDO][1]
# The position is updated by messages sent regularly (50Hz) from the device.
# However, we cannot rely only on this mechanism to update the position,
# as it wouldn't detect loss of connection, and would silently report
# old values. So, whenever the position is explicitly read, we check it
# was updated recently, and if not, attempt to recover, or raise an error.
self._last_pos_update = 0
self.position = model.VigilantAttribute({"z": 0}, unit="m", readonly=True, getter=self._get_position)
# Note that the range of the position is undefined, even in pixels, the
# value can go out of the actual CCD, as it could be that the gaussian
# pick is outside. In addition, the scale factor could in theory change
# on-the-fly (although, typically only during calibration).
self._updatePosition(self._read_position())
# Set callback for the position update
self._configure_device()
# TODO: add a heartbeat monitor to automatically attempt connection recovery
def _connect(self, channel, node_idx, datasheet):
"""
return network, node
raise HwError() if the device is not connected
raise ValueError(): if the device doesn't seem the right one
"""
# Connect to the CANbus and the CANopen network.
network = canopen.Network()
bustype = 'socketcan' if channel != 'fake' else 'virtual'
try:
network.connect(bustype=bustype, channel=channel)
network.check()
except CanError:
raise model.HwError("CAN network %s not found." % (channel,))
except OSError as ex:
if ex.errno == 19: # No such device
raise model.HwError("CAN network %s not found." % (channel,))
raise
# Tell CANopen what we *expect* to find
if channel == 'fake':
node = FakeRemoteNode(node_idx, datasheet)
else:
node = canopen.RemoteNode(node_idx, datasheet)
# Note: add_node() supports a "upload_eds" flag to read the object dict from
# the device. However the current firmware doesn't support that.
network.add_node(node)
# Check the device is there, and also force the state to be updated
try:
if channel != "fake":
node.nmt.wait_for_heartbeat(timeout=5)
except NmtError:
raise model.HwError("Focus tracker not found on channel %s with ID %s" % (channel, node_idx))
logging.debug("Device is in state %s", node.nmt.state)
# If the device is stopped, it won't answer any SDO
if node.nmt.state not in ("OPERATIONAL", "PRE-OPERATIONAL"):
node.nmt.state = "PRE-OPERATIONAL"
logging.debug("Turning on the device to state %s", node.nmt.state)
# Check that the device has the right Vendor ID and Product code, mostly
# in case the node index corresponds to a different device, also on the network.
vid = node.sdo["Identity object"]["Vendor-ID"].raw
pcode = node.sdo["Identity object"]["Product code"].raw
if vid != VENDOR_ID or pcode != PRODUCT_CODE:
raise ValueError("Device %d on channel %s doesn't seem to be a FocusTracker (vendor 0x%04x, product 0x%04x)" %
(node_idx, channel, vid, pcode))
return network, node
def _configure_device(self):
# Configure for automatic transmission (Transmit Process Data Object)
# For some background info, see https://canopen.readthedocs.io/en/latest/pdo.html
# The focus tracker typically sends the position at ~50Hz.
self.node.nmt.state = "PRE-OPERATIONAL"
# Read PDO configuration from node
self.node.tpdo.read()
# Need to reset, as it can only send one variable at a time. (TPDOs
# apparently can send 8bytes at a time, while the values take 4 bytes,
# so maybe it's a bug in the device?)
self.node.tpdo[1].clear()
self.node.tpdo[1].add_variable(POS_SDO, 1)
self.node.tpdo[1].enabled = True
self.node.tpdo.save()
# Change state to operational (NMT start)
self.node.nmt.state = "OPERATIONAL"
self.node.tpdo[1].add_callback(self._on_tpdo)
def terminate(self):
"""Disconnect from CAN bus."""
if self.network:
# Turn "off" the device (stops sending TPDOs)
self.node.nmt.state = "STOPPED"
self.network.sync.stop()
self.network.disconnect()
self.network = None
super().terminate()
def _try_recover(self):
self.state._set_value(model.HwError("Connection lost, reconnecting..."), force_write=True)
# Retry to connect to the device, infinitely
while True:
if self.network:
try:
self.network.disconnect()
except Exception:
logging.exception("Failed closing the previous network")
self.network = None
self.node = None
try:
logging.debug("Searching for the device %d on bus %s", self._node_idx, self._channel)
self.network, self.node = self._connect(self._channel, self._node_idx, self._datasheet)
self._position_sdo = self.node.sdo[POS_SDO][1]
self._configure_device()
except model.HwError as ex:
logging.info("%s", ex)
except Exception:
logging.exception("Unexpected error while trying to recover device")
raise
else:
# We found it back!
break
# it now should be accessible again
self.state._set_value(model.ST_RUNNING, force_write=True)
logging.info("Recovered device on bus %s", self._channel)
def updateMetadata(self, md):
if model.MD_POS_COR in md:
# Set the MD_POS_COR as Tare zero, so that we don't need to do the
# subtraction ourselves... and it's stays stored as long as the device
# is powered up (main advantage).
pos_cor = md[model.MD_POS_COR]
if not isinstance(pos_cor, numbers.Real):
raise ValueError("MD_POS_COR must be a float, but got %s" % (pos_cor,))
self.node.sdo["AI Tare zero"][1].raw = pos_cor * 1e6
# Read back the actual value (to read the floating error caused by float32)
md[model.MD_POS_COR] = self.node.sdo["AI Tare zero"][1].raw * 1e-6
logging.info("Updated MD_POS_COR to %s", md[model.MD_POS_COR])
# Force an update of the position, with the new shift
self._updatePosition(self._read_position())
model.HwComponent.updateMetadata(self, md)
def _read_position(self):
"""
return (float): The current position of the laser on the linear ccd, in m
"""
try:
pos = self._position_sdo.raw
except CanError:
logging.exception("Error reading position, will try to reconnect")
# TODO: should this be blocking? Or maybe stop after a timeout?
self._try_recover() # Blocks until the device is reconnected
pos = self._position_sdo.raw
return pos * 1e-6
def _on_tpdo(self, pdos):
"""
Callback when the TPDOs are received
pdos (pdo.Map): the variables received
"""
# This normally happens at 50Hz, so no log
# logging.debug("received TPDO with %s = %s", pdos[0].name, pdos[0].raw)
pos = pdos[0].raw * 1e-6
# TODO: this is updated very often, and is blocking the reception. So it
# might be safer to update the position in a separate thread
self._updatePosition(pos)
def _updatePosition(self, pos):
if self._inverted:
pos = -pos
# This normally happens at 50Hz, so no log
# logging.debug("Reporting new position at %s", pos)
p = {"z": pos}
self.position._set_value(p, force_write=True)
self._last_pos_update = time.time()
def _get_position(self):
"""
getter of the .position VA
"""
if self._last_pos_update < time.time() - MAX_POS_AGE:
# Force reading the position explicitly (and possibly fail explicitly)
logging.info("Reading position explicitly as last update was %g s ago",
time.time() - self._last_pos_update)
pos = self._read_position()
self._updatePosition(pos)
return self.position._value
# The size of the CCD, plus a margin corresponding to where the gaussian peak
# could be when it's on the border.
INPUT_FV_RANGE = [-50, 4096 + 50] # px
class FakeRemoteNode(canopen.RemoteNode):
# Note: in reality, idx and subidx can be either a string or a int.
# We only support one, so pick the same as in the actual driver.
_fake_values = [
# idx, subidx, initial value
('AI Input FV', 1, 100),
('AI Scaling Factor', 1, 1),
('AI Input PV', 1, 100),
('AI Tare zero', 1, 0),
('AI Net PV', 1, 100),
("Identity object", "Vendor-ID", VENDOR_ID),
("Identity object", "Product code", PRODUCT_CODE),
("Identity object", "Revision number", 0x00010001),
("Identity object", "Serial number", 0x123fa4e),
]
def __init__(self, node_idx, object_dict):
super().__init__(node_idx, object_dict)
self.tpdo = FakeTPDO(self)
self.tpdo[1].map.append(self.sdo[POS_SDO][1])
self._tpdo_updater = util.RepeatingTimer(0.08, self._updateTPDO, "TPDO updater")
self._tpdo_updater.start()
def add_sdo(self, rx_cobid, tx_cobid):
# Called at init, to create the SdoClient
client = SdoClientOverlay(rx_cobid, tx_cobid, self.object_dictionary)
# Create fake arrays
fake_sdos = {}
for idx, subidx, v in self._fake_values:
sdo_array = fake_sdos.setdefault(idx, {})
sdo_array[subidx] = FakeSdoVariable(client[idx][subidx], self.object_dictionary[idx][subidx], init_value=v)
# Force recomputing everything when Tare zero or Scaling Factor are set
fake_sdos['AI Tare zero'][1].callback = self._updateTPDO
fake_sdos['AI Scaling Factor'][1].callback = self._updateTPDO
client.overlay.update(fake_sdos)
self.sdo_channels.append(client)
if self.network is not None:
self.network.subscribe(client.tx_cobid, client.on_response)
return client
def _updateTPDO(self, _=None):
# Generate a new position, randomly a little bit away from the previous position
pos = self.sdo["AI Input FV"][1].raw
pos = max(INPUT_FV_RANGE[0], min(pos + random.randint(-2, 2), INPUT_FV_RANGE[1]))
self.sdo["AI Input FV"][1].raw = pos
self.sdo["AI Input PV"][1].raw = pos * self.sdo["AI Scaling Factor"][1].raw
self.sdo["AI Net PV"][1].raw = self.sdo["AI Input PV"][1].raw - self.sdo["AI Tare zero"][1].raw
self.tpdo[1][f"{POS_SDO}.{POS_SDO} 1"].raw = self.sdo[POS_SDO][1].raw
# Send the new pos
self.tpdo._notify()
class FakeSdoVariable(canopen.sdo.base.Variable):
"""Simulates an SDO Variable object where the raw data can be set and read."""
def __init__(self, object_sdo, object_dict, init_value, callback=None):
super().__init__(object_sdo, object_dict)
self._raw = init_value
self.callback = callback
@property
def raw(self):
return self._raw
@raw.setter
def raw(self, value):
self._raw = value
if self.callback:
self.callback(self)
class FakeTPDO(canopen.pdo.TPDO):
def read(self):
pass
def save(self):
pass
def _notify(self):
for i, map in self.map.items():
for callback in map.callbacks:
callback(map)
class SdoClientOverlay(canopen.sdo.SdoClient):
"""Creates a dictionary that can be accessed with dots."""
def __init__(self, rx_cobid, tx_cobid, od):
super().__init__(rx_cobid, tx_cobid, od)
self.overlay = {}
def __getitem__(self, idx):
try:
return self.overlay[idx]
except KeyError:
return super().__getitem__(idx)
| delmic/odemis | src/odemis/driver/focustracker.py | Python | gpl-2.0 | 16,925 | [
"Gaussian"
] | f8f20439dfc09243b528b9f23489d49c93cd82ef02c370b867bb38e0b3ad371b |
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# L.Pritchard@scri.ac.uk
################################################################################
""" Feature module
Provides:
o Feature - class to wrap Bio.SeqFeature objects with drawing information
For drawing capabilities, this module uses reportlab to define colors:
http://www.reportlab.com
For dealing with biological information, the package uses BioPython:
http://www.biopython.org
"""
# ReportLab imports
from reportlab.lib import colors
# GenomeDiagram imports
from _Colors import ColorTranslator
import string
class Feature(object):
""" Class to wrap Bio.SeqFeature objects for GenomeDiagram
Provides:
Methods:
o __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen) Called when the feature is
instantiated
o set_feature(self, feature) Wrap the passed feature
o get_feature(self) Return the unwrapped Bio.SeqFeature object
o set_color(self, color) Set the color in which the feature will
be drawn (accepts multiple formats: reportlab color.Color()
tuple and color.name, or integer representing Artemis color
o get_color(self) Returns color.Color tuple of the feature's color
o __getattr__(self, name) Catches attribute requests and passes them to
the wrapped Bio.SeqFeature object
Attributes:
o parent FeatureSet, container for the object
o id Unique id
o color color.Color, color to draw the feature
o hide Boolean for whether the feature will be drawn or not
o sigil String denoting the type of sigil to use for the feature.
Currently either "BOX" or "ARROW" are supported.
o arrowhead_length Float denoting length of the arrow head to be drawn,
relative to the bounding box height. The arrow shaft
takes up the remainder of the bounding box's length.
o arrowshaft_height Float denoting length of the representative arrow
shaft to be drawn, relative to the bounding box height.
The arrow head takes the full height of the bound box.
o name_qualifiers List of Strings, describes the qualifiers that may
contain feature names in the wrapped Bio.SeqFeature object
o label Boolean, 1 if the label should be shown
o label_font String describing the font to use for the feature label
o label_size Int describing the feature label font size
o label_color color.Color describing the feature label color
o label_angle Float describing the angle through which to rotate the
feature label in degrees (default = 45, linear only)
o label_position String, 'start', 'end' or 'middle' denoting where
to place the feature label (linear only)
o locations List of tuples of (start, end) ints describing where the
feature and any subfeatures start and end
o type String denoting the feature type
o name String denoting the feature name
o strand Int describing the strand on which the feature is found
"""
def __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen, label=0, border=None, colour=None):
""" __init__(self, parent=None, feature_id=None, feature=None,
color=colors.lightgreen, label=0)
o parent FeatureSet containing the feature
o feature_id Unique id for the feature
o feature Bio.SeqFeature object to be wrapped
o color color.Color Color to draw the feature (overridden
by backwards compatible argument with UK spelling,
colour). Either argument is overridden if 'color'
is found in feature qualifiers
o border color.Color Color to draw the feature border, use
None for the same as the fill color, False for no border.
o label Boolean, 1 if the label should be shown
"""
#Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
self._colortranslator = ColorTranslator()
# Initialise attributes
self.parent = parent
self.id = feature_id
self.color = color # default color to draw the feature
self.border = border
self._feature = None # Bio.SeqFeature object to wrap
self.hide = 0 # show by default
self.sigil = 'BOX'
self.arrowhead_length = 0.5 # 50% of the box height
self.arrowshaft_height = 0.4 # 40% of the box height
self.name_qualifiers = ['gene', 'label', 'name', 'locus_tag', 'product']
self.label = label
self.label_font = 'Helvetica'
self.label_size = 6
self.label_color = colors.black
self.label_angle = 45
self.label_position = 'start'
if feature is not None:
self.set_feature(feature)
def set_feature(self, feature):
""" set_feature(self, feature)
o feature Bio.SeqFeature object to be wrapped
Defines the Bio.SeqFeature object to be wrapped
"""
self._feature = feature
self.__process_feature()
def __process_feature(self):
""" __process_feature(self)
Examine the feature to be wrapped, and set some of the Feature's
properties accordingly
"""
self.locations = []
bounds = []
if self._feature.sub_features == []:
start = self._feature.location.nofuzzy_start
end = self._feature.location.nofuzzy_end
#if start > end and self.strand == -1:
# start, end = end, start
self.locations.append((start, end))
bounds += [start, end]
else:
for subfeature in self._feature.sub_features:
start = subfeature.location.nofuzzy_start
end = subfeature.location.nofuzzy_end
#if start > end and self.strand == -1:
# start, end = end, start
self.locations.append((start, end))
bounds += [start, end]
self.type = str(self._feature.type) # Feature type
#TODO - Strand can vary with subfeatures (e.g. mixed strand tRNA)
if self._feature.strand is None:
#This is the SeqFeature default (None), but the drawing code
#only expects 0, +1 or -1.
self.strand = 0
else:
self.strand = int(self._feature.strand) # Feature strand
if 'color' in self._feature.qualifiers: # Artemis color (if present)
self.color = self._colortranslator.artemis_color( \
self._feature.qualifiers['color'][0])
self.name = self.type
for qualifier in self.name_qualifiers:
if qualifier in self._feature.qualifiers:
self.name = self._feature.qualifiers[qualifier][0]
break
#Note will be 0 to N for origin wrapping feature on genome of length N
self.start, self.end = min(bounds), max(bounds)
def get_feature(self):
""" get_feature(self) -> Bio.SeqFeature
Returns the unwrapped Bio.SeqFeature object
"""
return self._feature
def set_colour(self, colour):
"""Backwards compatible variant of set_color(self, color) using UK spelling."""
color = self._colortranslator.translate(colour)
self.color = color
def set_color(self, color):
""" set_color(self, color)
o color The color to draw the feature - either a colors.Color
object, an RGB tuple of floats, or an integer
corresponding to colors in colors.txt
Set the color in which the feature will be drawn
"""
#TODO - Make this into the set method for a color property?
color = self._colortranslator.translate(color)
self.color = color
def __getattr__(self, name):
""" __getattr__(self, name) -> various
If the Feature class doesn't have the attribute called for,
check in self._feature for it
"""
return getattr(self._feature, name) # try to get the attribute from the feature
################################################################################
# RUN AS SCRIPT
################################################################################
if __name__ == '__main__':
# Test code
gdf = Feature()
| bryback/quickseq | genescript/Bio/Graphics/GenomeDiagram/_Feature.py | Python | mit | 9,482 | [
"Biopython"
] | e4ed86e0008b30d3127938d000a165d1307f622bafd6e7480efd5895d59ff75d |
import random
from datetime import date, time, timedelta
from decimal import Decimal
from unittest import skipUnless
from django.conf import settings
from django.db.models import F, Func, Value
from django.db.models.functions import Concat
from django.test import TestCase
from django.utils import timezone
from django_bulk_update import helper
from .models import Person, Role, PersonUUID, Brand
from .fixtures import create_fixtures
class BulkUpdateTests(TestCase):
def setUp(self):
self.now = timezone.now().replace(microsecond=0) # mysql doesn't do microseconds. # NOQA
self.date = date(2015, 3, 28)
self.time = time(13, 0)
create_fixtures()
def _test_field(self, field, idx_to_value_function):
'''
Helper to do repeative simple tests on one field.
'''
# set
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
value = idx_to_value_function(idx)
setattr(person, field, value)
# update
Person.objects.bulk_update(people, update_fields=[field])
# check
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
saved_value = getattr(person, field)
expected_value = idx_to_value_function(idx)
self.assertEqual(saved_value, expected_value)
def test_simple_fields(self):
fn = lambda idx: idx + 27
for field in ('default', 'big_age', 'age', 'positive_age',
'positive_small_age', 'small_age'):
self._test_field(field, fn)
def test_boolean_field(self):
fn = lambda idx: [True, False][idx % 2]
self._test_field('certified', fn)
def test_null_boolean_field(self):
fn = lambda idx: [True, False, None][idx % 3]
self._test_field('null_certified', fn)
def test_char_field(self):
NAMES = ['Walter', 'The Dude', 'Donny', 'Jesus', 'Buddha', 'Clark']
fn = lambda idx: NAMES[idx % 5]
self._test_field('name', fn)
def test_email_field(self):
EMAILS = ['walter@mailinator.com', 'thedude@mailinator.com',
'donny@mailinator.com', 'jesus@mailinator.com',
'buddha@mailinator.com', 'clark@mailinator.com']
fn = lambda idx: EMAILS[idx % 5]
self._test_field('email', fn)
def test_file_path_field(self):
PATHS = ['/home/dummy.txt', '/Downloads/kitten.jpg',
'/Users/user/fixtures.json', 'dummy.png',
'users.json', '/home/dummy.png']
fn = lambda idx: PATHS[idx % 5]
self._test_field('file_path', fn)
def test_slug_field(self):
SLUGS = ['jesus', 'buddha', 'clark', 'the-dude', 'donny', 'walter']
fn = lambda idx: SLUGS[idx % 5]
self._test_field('slug', fn)
def test_text_field(self):
TEXTS = ['this is a dummy text', 'dummy text', 'bla bla bla bla bla',
'here is a dummy text', 'dummy', 'bla bla bla']
fn = lambda idx: TEXTS[idx % 5]
self._test_field('text', fn)
def test_url_field(self):
URLS = ['docs.djangoproject.com', 'news.ycombinator.com',
'https://docs.djangoproject.com', 'https://google.com',
'google.com', 'news.ycombinator.com']
fn = lambda idx: URLS[idx % 5]
self._test_field('url', fn)
def test_date_time_field(self):
fn = lambda idx: self.now - timedelta(days=1 + idx, hours=1 + idx)
self._test_field('date_time', fn)
def test_date_field(self):
fn = lambda idx: self.date - timedelta(days=1 + idx)
self._test_field('date', fn)
def test_time_field(self):
fn = lambda idx: time(1 + idx, idx)
self._test_field('time', fn)
def test_decimal_field(self):
fn = lambda idx: Decimal('1.%s' % (50 + idx * 7))
self._test_field('height', fn)
def test_float_field(self):
fn = lambda idx: float(idx) * 2.0
self._test_field('float_height', fn)
def test_data_field(self):
fn = lambda idx: {'x': idx}
self._test_field('data', fn)
def test_generic_ipaddress_field(self):
IPS = ['127.0.0.1', '192.0.2.30', '2a02:42fe::4', '10.0.0.1',
'8.8.8.8']
fn = lambda idx: IPS[idx % 5]
self._test_field('remote_addr', fn)
def test_image_field(self):
IMGS = ['kitten.jpg', 'dummy.png', 'user.json', 'dummy.png', 'foo.gif']
fn = lambda idx: IMGS[idx % 5]
self._test_field('image', fn)
self._test_field('my_file', fn)
def test_custom_fields(self):
values = {}
people = Person.objects.all()
people_dict = {p.name: p for p in people}
person = people_dict['Mike']
person.data = {'name': 'mikey', 'age': 99, 'ex': -99}
values[person.pk] = {'name': 'mikey', 'age': 99, 'ex': -99}
person = people_dict['Mary']
person.data = {'names': {'name': []}}
values[person.pk] = {'names': {'name': []}}
person = people_dict['Pete']
person.data = []
values[person.pk] = []
person = people_dict['Sandra']
person.data = [{'name': 'Pete'}, {'name': 'Mike'}]
values[person.pk] = [{'name': 'Pete'}, {'name': 'Mike'}]
person = people_dict['Ash']
person.data = {'text': 'bla'}
values[person.pk] = {'text': 'bla'}
person = people_dict['Crystal']
values[person.pk] = person.data
Person.objects.bulk_update(people)
people = Person.objects.all()
for person in people:
self.assertEqual(person.data, values[person.pk])
def test_update_fields(self):
"""
Only the fields in "update_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, update_fields=['age'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.age, person2.age)
self.assertNotEqual(person1.height, person2.height)
def test_update_foreign_key_fields(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people)
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_fields_explicit(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
person.big_age += 40
Person.objects.bulk_update(people,
update_fields=['age', 'height', 'role'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
self.assertNotEqual(person1.big_age, person2.big_age)
def test_update_foreign_key_fields_explicit_with_id_suffix(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height', 'role_id'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.role.code, person2.role.code)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_update_foreign_key_exclude_fields_explicit(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
person.big_age += 40
Person.objects.bulk_update(people,
update_fields=['age', 'height'],
exclude_fields=['role'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertTrue(isinstance(person1.role, Role))
self.assertEqual(person2.role, None)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
self.assertNotEqual(person1.big_age, person2.big_age)
def test_update_foreign_key_exclude_fields_explicit_with_id_suffix(self):
roles = [Role.objects.create(code=1), Role.objects.create(code=2)]
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
person.role = roles[0] if idx % 2 == 0 else roles[1]
Person.objects.bulk_update(people,
update_fields=['age', 'height'],
exclude_fields=['role_id'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertTrue(isinstance(person1.role, Role))
self.assertEqual(person2.role, None)
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_exclude_fields(self):
"""
Only the fields not in "exclude_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, exclude_fields=['age'])
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertNotEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_exclude_fields_with_tuple_exclude_fields(self):
"""
Only the fields not in "exclude_fields" are updated
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
Person.objects.bulk_update(people, exclude_fields=('age',))
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertNotEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
def test_object_list(self):
"""
Pass in a list instead of a queryset for bulk updating
"""
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
Person.objects.bulk_update(list(people))
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
self.assertEqual(person.big_age, idx + 27)
def test_empty_list(self):
"""
Update no elements, passed as a list
"""
Person.objects.bulk_update([])
def test_empty_queryset(self):
"""
Update no elements, passed as a queryset
"""
people = Person.objects.filter(name="Aceldotanrilsteucsebces ECSbd")
Person.objects.bulk_update(people)
def test_one_sized_list(self):
"""
Update one sized list, check if have a syntax error
for some db backends.
"""
people = Person.objects.all()[:1]
Person.objects.bulk_update(list(people))
def test_one_sized_queryset(self):
"""
Update one sized list, check if have a syntax error
for some db backends.
"""
people = Person.objects.filter(name='Mike')
Person.objects.bulk_update(people)
def test_wrong_field_names(self):
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update,
people, update_fields=['somecolumn', 'name'])
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update,
people, exclude_fields=['somecolumn'])
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.big_age = idx + 27
self.assertRaises(TypeError, Person.objects.bulk_update,
people, update_fields=['somecolumn'],
exclude_fields=['someothercolumn'])
def test_batch_size(self):
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age += 1
person.height += Decimal('0.01')
updated_obj_count = Person.objects.bulk_update(people, batch_size=1)
self.assertEqual(updated_obj_count, len(people))
people2 = Person.objects.order_by('pk').all()
for person1, person2 in zip(people, people2):
self.assertEqual(person1.age, person2.age)
self.assertEqual(person1.height, person2.height)
@skipUnless(settings.DATABASES['default']['USER'] == 'postgres',
"ArrayField's are only available in PostgreSQL.")
def test_array_field(self):
"""
Test to 'bulk_update' a postgresql's ArrayField.
"""
Brand.objects.bulk_create([
Brand(name='b1', codes=['a', 'b']),
Brand(name='b2', codes=['x']),
Brand(name='b3', codes=['x', 'y', 'z']),
Brand(name='b4', codes=['1', '2']),
])
brands = Brand.objects.all()
for brand in brands:
brand.codes.append(brand.codes[0]*2)
Brand.objects.bulk_update(brands)
expected = ['aa', 'xx', 'xx', '11']
for value, brand in zip(expected, brands):
self.assertEqual(brand.codes[-1], value)
def test_uuid_pk(self):
"""
Test 'bulk_update' with a model whose pk is an uuid.
"""
# create
PersonUUID.objects.bulk_create(
[PersonUUID(age=c) for c in range(20, 30)])
# set
people = PersonUUID.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age = idx * 11
# update
PersonUUID.objects.bulk_update(people, update_fields=['age'])
# check
people = PersonUUID.objects.order_by('pk').all()
for idx, person in enumerate(people):
saved_value = person.age
expected_value = idx * 11
self.assertEqual(saved_value, expected_value)
def test_F_expresion(self):
# initialize
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age = idx*10
person.save()
# set
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
person.age = F('age') - idx
# update
Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
for idx, person in enumerate(people):
saved_value = person.age
expected_value = idx*10 - idx
self.assertEqual(saved_value, expected_value)
def test_Func_expresion(self):
# initialize
ini_values = 'aA', 'BB', '', 'cc', '12'
people = Person.objects.order_by('pk').all()
for value, person in zip(ini_values, people):
person.name = value
person.text = value*2
person.save()
# set
people = Person.objects.order_by('pk').all()
for person in people:
person.name = Func(F('name'), function='UPPER')
person.text = Func(F('text'), function='LOWER')
# update
Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
expected_values = 'AA', 'BB', '', 'CC', '12'
for expected_value, person in zip(expected_values, people):
saved_value = person.name
self.assertEqual(saved_value, expected_value)
expected_values = 'aaaa', 'bbbb', '', 'cccc', '1212'
for expected_value, person in zip(expected_values, people):
saved_value = person.text
self.assertEqual(saved_value, expected_value)
def test_Concat_expresion(self):
# initialize
ini_values_1 = 'a', 'b', 'c', 'd', 'e'
ini_values_2 = 'v', 'w', 'x', 'y', 'z'
people = Person.objects.order_by('pk').all()
for value1, value2, person in zip(ini_values_1, ini_values_2, people):
person.slug = value1
person.name = value2
person.save()
# set
people = Person.objects.order_by('pk').all()
for person in people:
person.text = Concat(F('slug'), Value('@'), F('name'), Value('|'))
# update
Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
expected_values = 'a@v|', 'b@w|', 'c@x|', 'd@y|', 'e@z|'
for expected_value, person in zip(expected_values, people):
saved_value = person.text
self.assertEqual(saved_value, expected_value)
def test_different_deferred_fields(self):
# initialize
people = Person.objects.order_by('pk').all()
for person in people:
person.name = 'original name'
person.text = 'original text'
person.save()
# set
people1 = list(Person.objects.filter(age__lt=10).only('name'))
people2 = list(Person.objects.filter(age__gte=10).only('text'))
people = people1 + people2
for person in people:
if person.age < 10:
person.name = 'changed name'
else:
person.text = 'changed text'
# update
count = Person.objects.bulk_update(people)
# check
people = Person.objects.order_by('pk').all()
self.assertEquals(count, people.count())
for person in people:
if person.age < 10:
self.assertEquals(person.name, 'changed name')
self.assertEquals(person.text, 'original text')
else:
self.assertEquals(person.name, 'original name')
self.assertEquals(person.text, 'changed text')
def test_different_deferred_fields_02(self):
# initialize
people = Person.objects.order_by('pk').all()
for person in people:
person.name = 'original name'
person.text = 'original text'
person.save()
# set
people1 = list(Person.objects.filter(age__lt=10).only('name'))
people2 = list(Person.objects.filter(age__gte=10).only('text'))
people = people1 + people2
for person in people:
if person.age < 10:
person.name = 'changed name'
else:
person.text = 'changed text'
# update
count = Person.objects.bulk_update(people, exclude_fields=['name'])
# check
people = Person.objects.order_by('pk').all()
self.assertEquals(count, people.count())
for person in people:
if person.age < 10:
self.assertEquals(person.name, 'original name')
self.assertEquals(person.text, 'original text')
else:
self.assertEquals(person.name, 'original name')
self.assertEquals(person.text, 'changed text')
class NumQueriesTest(TestCase):
def setUp(self):
create_fixtures(5)
def test_num_queries(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.order_by('pk').all()
self.assertNumQueries(2, Person.objects.bulk_update, people)
def test_already_evaluated_queryset(self):
"""
Queries:
- update objects
(objects are already retrieved, because of the previous loop)
"""
people = Person.objects.all()
for person in people:
person.age += 2
person.name = Func(F('name'), function='UPPER')
person.text = 'doc'
person.height -= Decimal(0.5)
self.assertNumQueries(1, Person.objects.bulk_update, people)
def test_explicit_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.all()
self.assertNumQueries(
2, Person.objects.bulk_update, people,
update_fields=['date', 'time', 'image', 'slug', 'height'],
exclude_fields=['date', 'url']
)
def test_deferred_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.all().only('date', 'url', 'age', 'image')
self.assertNumQueries(2, Person.objects.bulk_update, people)
def test_different_deferred_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
all_people = Person.objects
people1 = all_people.filter(age__lt=10).defer('date', 'url', 'age')
people2 = all_people.filter(age__gte=10).defer('url', 'name', 'big_age')
people = people1 | people2
self.assertNumQueries(2, Person.objects.bulk_update, people)
def test_deferred_fields_and_excluded_fields(self):
"""
Queries:
- retrieve objects
- update objects
"""
people = Person.objects.all().only('date', 'age', 'time', 'image', 'slug')
self.assertNumQueries(2, Person.objects.bulk_update, people,
exclude_fields=['date', 'url'])
def test_list_of_objects(self):
"""
Queries:
- update objects
(objects are already retrieved, because of the cast to list)
"""
people = list(Person.objects.all())
self.assertNumQueries(1, Person.objects.bulk_update, people)
def test_fields_to_update_are_deferred(self):
"""
As all fields in 'update_fields' are deferred,
a query will be done for each obj and field to retrieve its value.
"""
people = Person.objects.all().only('pk')
update_fields = ['date', 'time', 'image']
expected_queries = len(update_fields) * Person.objects.count() + 2
self.assertNumQueries(expected_queries, Person.objects.bulk_update,
people, update_fields=update_fields)
def test_no_field_to_update(self):
"""
Queries:
- retrieve objects
(as update_fields is empty, no update query will be done)
"""
people = Person.objects.all()
self.assertNumQueries(1, Person.objects.bulk_update,
people, update_fields=[])
def test_no_objects(self):
"""
Queries:
- retrieve objects
(as no objects is actually retrieved, no update query will be done)
"""
people = Person.objects.filter(name='xxx')
self.assertNumQueries(1, Person.objects.bulk_update,
people, update_fields=['age', 'height'])
def test_batch_size(self):
"""
Queries:
- retrieve objects
- update objects * 3
"""
self.assertEquals(Person.objects.count(), 5)
people = Person.objects.order_by('pk').all()
self.assertNumQueries(4, Person.objects.bulk_update,
people, batch_size=2)
class GetFieldsTests(TestCase):
total_fields = 24
def setUp(self):
create_fixtures()
def _assertEquals(self, fields, names):
self.assertEquals(
set(field.name for field in fields),
set(names),
)
def _assertIn(self, names, fields):
field_names = [field.name for field in fields]
for name in names:
self.assertIn(name, field_names)
def _assertNotIn(self, names, fields):
field_names = [field.name for field in fields]
for name in names:
self.assertNotIn(name, field_names)
def test_get_all_fields(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self.assertEquals(len(fields), self.total_fields)
def test_dont_get_primary_key(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['id'], meta.get_fields()) # sanity check
self._assertNotIn(['id'], fields) # actual test
meta = PersonUUID.objects.create(age=3)._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['uuid'], meta.get_fields()) # sanity check
self._assertNotIn(['uuid'], fields) # actual test
def test_dont_get_reversed_relations(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['companies'], meta.get_fields()) # sanity check
self._assertNotIn(['companies'], fields) # actual test
def test_dont_get_many_to_many_relations(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertIn(['jobs'], meta.get_fields()) # sanity check
self._assertNotIn(['jobs'], fields) # actual test
def test_update_fields(self):
meta = Person.objects.first()._meta
update_fields = ['age', 'email', 'text']
exclude_fields = []
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['age', 'email', 'text'])
def test_update_fields_and_exclude_fields(self):
meta = Person.objects.first()._meta
update_fields = ['age', 'email', 'text']
exclude_fields = ['email', 'height']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['age', 'text'])
def test_empty_update_fields(self):
meta = Person.objects.first()._meta
update_fields = []
exclude_fields = ['email', 'height']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, [])
def test_exclude_a_foreignkey(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = ['email', 'role']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self.assertEquals(len(fields), self.total_fields - 2)
self._assertNotIn(['email', 'role'], fields)
def test_exclude_foreignkey_with_id_suffix(self):
meta = Person.objects.first()._meta
update_fields = None
exclude_fields = ['email', 'role_id']
fields = helper.get_fields(update_fields, exclude_fields, meta)
self.assertEquals(len(fields), self.total_fields - 2)
self._assertNotIn(['email', 'role'], fields)
def test_get_a_foreignkey(self):
meta = Person.objects.first()._meta
update_fields = ['role', 'my_file']
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['role', 'my_file'])
def test_get_foreignkey_with_id_suffix(self):
meta = Person.objects.first()._meta
update_fields = ['role_id', 'my_file']
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['role', 'my_file'])
def test_obj_argument(self):
obj = Person.objects.first()
meta = obj._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self.assertEquals(len(fields), self.total_fields)
def test_only_get_not_deferred_fields(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = None
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, ['name', 'age', 'height'])
def test_only_and_exclude_fields(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = None
exclude_fields = ['age', 'date']
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, ['name', 'height'])
def test_only_and_exclude_fields_02(self):
obj = Person.objects.defer('age', 'height').first()
meta = obj._meta
update_fields = None
exclude_fields = ['image', 'data']
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self.assertEquals(len(fields), self.total_fields - 4)
self._assertNotIn(['age', 'height', 'image', 'data'], fields)
def test_update_fields_over_not_deferred_field(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = ['date', 'time', 'age']
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, ['date', 'time', 'age'])
def test_update_fields_over_not_deferred_field_02(self):
obj = Person.objects.only('name', 'age', 'height').first()
meta = obj._meta
update_fields = []
exclude_fields = None
fields = helper.get_fields(update_fields, exclude_fields, meta, obj)
self._assertEquals(fields, [])
def test_arguments_as_tuples(self):
meta = Person.objects.first()._meta
update_fields = ('age', 'email', 'text')
exclude_fields = ('email', 'height')
fields = helper.get_fields(update_fields, exclude_fields, meta)
self._assertEquals(fields, ['age', 'text'])
def test_validate_fields(self):
meta = Person.objects.first()._meta
update_fields = ['age', 'wrong_name', 'text']
exclude_fields = ('email', 'height')
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
update_fields = ('age', 'email', 'text')
exclude_fields = ('email', 'bad_name')
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
update_fields = ('companies', )
exclude_fields = None
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
update_fields = None
exclude_fields = ['jobs']
self.assertRaises(TypeError, helper.get_fields,
update_fields, exclude_fields, meta)
| aykut/django-bulk-update | tests/tests.py | Python | mit | 32,938 | [
"CRYSTAL"
] | 0236a3f180cc5ecd68687ba2fc17b0321a36ca77627c617fefe23db513d3104f |
#######################################################################
# Tests for IlluminaData.py module
#######################################################################
from bcftbx.IlluminaData import *
import bcftbx.utils
import unittest
import cStringIO
import tempfile
import shutil
class MockIlluminaData:
"""Utility class for creating mock Illumina analysis data directories
The MockIlluminaData class allows artificial Illumina analysis data
directories to be defined, created and populated, and then destroyed.
These artifical directories are intended to be used for testing
purposes.
Basic example usage:
>>> mockdata = MockIlluminaData('130904_PJB_XXXXX')
>>> mockdata.add_fastq('PJB','PJB1','PJB1_GCCAAT_L001_R1_001.fastq.gz')
>>> ...
>>> mockdata.create()
This will make a directory structure:
1130904_PJB_XXXXX/
Unaligned/
Project_PJB/
Sample_PJB1/
PJB1_GCCAAT_L001_R1_001.fastq.gz
...
Multiple fastqs can be more easily added using e.g.:
>>> mockdata.add_fastq_batch('PJB','PJB2','PJB1_GCCAAT',lanes=(1,4,5))
which creates 3 fastq entries for sample PJB2, with lane numbers 1, 4
and 5.
Paired-end mock data can be created using the 'paired_end' flag
when instantiating the MockIlluminaData object.
To delete the physical directory structure when finished:
>>> mockdata.remove()
"""
def __init__(self,name,unaligned_dir='Unaligned',paired_end=False,top_dir=None):
"""Create new MockIlluminaData instance
Makes a new empty MockIlluminaData object.
Arguments:
name: name of the directory for the mock data
unaligned_dir: directory holding the mock projects etc (default is
'Unaligned')
paired_end: specify whether mock data is paired end (True) or not
(False) (default is False)
top_dir: specify a parent directory for the mock data (default is
the current working directory)
"""
self.__created = False
self.__name = name
self.__unaligned_dir = unaligned_dir
self.__paired_end = paired_end
self.__undetermined_dir = 'Undetermined_indices'
if top_dir is not None:
self.__top_dir = os.path.abspath(top_dir)
else:
self.__top_dir = os.getcwd()
self.__projects = {}
@property
def name(self):
"""Name of the mock data
"""
return self.__name
@property
def dirn(self):
"""Full path to the mock data directory
"""
return os.path.join(self.__top_dir,self.__name)
@property
def unaligned_dir(self):
"""Full path to the unaligned directory for the mock data
"""
return os.path.join(self.dirn,self.__unaligned_dir)
@property
def paired_end(self):
"""Whether or not the mock data is paired ended
"""
return self.__paired_end
@property
def projects(self):
"""List of project names within the mock data
"""
projects = []
for project_name in self.__projects:
if project_name.startswith('Project_'):
projects.append(project_name.split('_')[1])
projects.sort()
return projects
@property
def has_undetermined(self):
"""Whether or not undetermined indices are included
"""
return (self.__undetermined_dir in self.__projects)
def samples_in_project(self,project_name):
"""List of sample names associated with a specific project
Arguments:
project_name: name of a project
Returns:
List of sample names
"""
project = self.__projects[self.__project_dir(project_name)]
samples = []
for sample_name in project:
if sample_name.startswith('Sample_'):
samples.append(sample_name.split('_')[1])
samples.sort()
return samples
def fastqs_in_sample(self,project_name,sample_name):
"""List of fastq names associated with a project/sample pair
Arguments:
project_name: name of a project
sample_name: name of a sample
Returns:
List of fastq names.
"""
project_dir = self.__project_dir(project_name)
sample_dir = self.__sample_dir(sample_name)
return self.__projects[project_dir][sample_dir]
def __project_dir(self,project_name):
"""Internal: convert project name to internal representation
Project names are prepended with "Project_" if not already
present, or if it is the "undetermined_indexes" directory.
Arguments:
project_name: name of a project
Returns:
Canonical project name for internal storage.
"""
if project_name.startswith('Project_') or \
project_name.startswith(self.__undetermined_dir):
return project_name
else:
return 'Project_' + project_name
def __sample_dir(self,sample_name):
"""Internal: convert sample name to internal representation
Sample names are prepended with "Sample_" if not already
present.
Arguments:
sample_name: name of a sample
Returns:
Canonical sample name for internal storage.
"""
if sample_name.startswith('Sample_'):
return sample_name
else:
return 'Sample_' + sample_name
def add_project(self,project_name):
"""Add a project to the MockIlluminaData instance
Defines a project within the MockIlluminaData structure.
Note that any leading 'Project_' is ignored i.e. the project
name is taken to be the remainder of the name.
No error is raised if the project already exists.
Arguments:
project_name: name of the new project
Returns:
Dictionary object corresponding to the project.
"""
project_dir = self.__project_dir(project_name)
if project_dir not in self.__projects:
self.__projects[project_dir] = {}
return self.__projects[project_dir]
def add_sample(self,project_name,sample_name):
"""Add a sample to a project within the MockIlluminaData instance
Defines a sample with a project in the MockIlluminaData
structure. Note that any leading 'Sample_' is ignored i.e. the
sample name is taken to be the remainder of the name.
If the parent project doesn't exist yet then it will be
added automatically; no error is raised if the sample already
exists.
Arguments:
project_name: name of the parent project
sample_name: name of the new sample
Returns:
List object corresponding to the sample.
"""
project = self.add_project(project_name)
sample_dir = self.__sample_dir(sample_name)
if sample_dir not in project:
project[sample_dir] = []
return project[sample_dir]
def add_fastq(self,project_name,sample_name,fastq):
"""Add a fastq to a sample within the MockIlluminaData instance
Defines a fastq within a project/sample pair in the MockIlluminaData
structure.
NOTE: it is recommended to use add_fastq_batch, which offers more
flexibility and automatically maintains consistency e.g. when
mocking a paired end data structure.
Arguments:
project_name: parent project
sample_name: parent sample
fastq: name of the fastq to add
"""
sample = self.add_sample(project_name,sample_name)
sample.append(fastq)
sample.sort()
def add_fastq_batch(self,project_name,sample_name,fastq_base,fastq_ext='fastq.gz',
lanes=(1,)):
"""Add a set of fastqs within a sample
This method adds a set of fastqs within a sample with a single
invocation, and is intended to simulate the situation where there
are multiple fastqs due to paired end sequencing and/or sequencing
of the sample across multiple lanes.
The fastq names are constructed from a base name (e.g. 'PJB-1_GCCAAT'),
plus a list/tuple of lane numbers. One fastq will be added for each
lane number specified, e.g.:
>>> d.add_fastq_batch('PJB','PJB-1','PJB-1_GCCAAT',lanes=(1,4,5))
will add PJB-1_GCCAAT_L001_R1_001, PJB-1_GCCAAT_L004_R1_001 and
PJB-1_GCCAAT_L005_R1_001 fastqs.
If the MockIlluminaData object was created with the paired_end flag
set to True then matching R2 fastqs will also be added.
Arguments:
project_name: parent project
sample_name: parent sample
fastq_base: base name of the fastq name i.e. just the sample name
and barcode sequence (e.g. 'PJB-1_GCCAAT')
fastq_ext: file extension to use (optional, defaults to 'fastq.gz')
lanes: list, tuple or iterable with lane numbers (optional,
defaults to (1,))
"""
if self.__paired_end:
reads = (1,2)
else:
reads = (1,)
for lane in lanes:
for read in reads:
fastq = "%s_L%03d_R%d_001.%s" % (fastq_base,
lane,read,
fastq_ext)
self.add_fastq(project_name,sample_name,fastq)
def add_undetermined(self,lanes=(1,)):
"""Add directories and files for undetermined reads
This method adds a set of fastqs for any undetermined reads from
demultiplexing.
Arguments:
lanes: list, tuple or iterable with lane numbers (optional,
defaults to (1,))
"""
for lane in lanes:
sample_name = "Sample_lane%d" % lane
fastq_base = "lane%d_Undetermined" % lane
self.add_sample(self.__undetermined_dir,sample_name)
self.add_fastq_batch(self.__undetermined_dir,sample_name,fastq_base,
lanes=(lane,))
def create(self):
"""Build and populate the directory structure
Creates the directory structure on disk which has been defined
within the MockIlluminaData object.
Invoke the 'remove' method to delete the directory structure.
The contents of the MockIlluminaData object can be modified
after the directory structure has been created, but changes will
not be reflected on disk. Instead it is necessary to first
remove the directory structure, and then re-invoke the create
method.
create raises an OSError exception if any part of the directory
structure already exists.
"""
# Create top level directory
if os.path.exists(self.dirn):
raise OSError,"%s already exists" % self.dirn
else:
bcftbx.utils.mkdir(self.dirn)
self.__created = True
# "Unaligned" directory
bcftbx.utils.mkdir(self.unaligned_dir)
# Populate with projects, samples etc
for project_name in self.__projects:
project_dirn = os.path.join(self.unaligned_dir,project_name)
bcftbx.utils.mkdir(project_dirn)
for sample_name in self.__projects[project_name]:
sample_dirn = os.path.join(project_dirn,sample_name)
bcftbx.utils.mkdir(sample_dirn)
for fastq in self.__projects[project_name][sample_name]:
fq = os.path.join(sample_dirn,fastq)
# "Touch" the file (i.e. creates an empty file)
open(fq,'wb+').close()
def remove(self):
"""Delete the directory structure and contents
This removes the directory structure from disk that has
previously been created using the create method.
"""
if self.__created:
shutil.rmtree(self.dirn)
self.__created = False
class TestIlluminaData(unittest.TestCase):
"""Collective tests for IlluminaData, IlluminaProject and IlluminaSample
Test methods use the following pattern:
1. Invoke makeMockIlluminaData factory method to produce a variant
of an artificial directory structure mimicking that produced by the
bcl to fastq conversion process
2. Populate an IlluminaData object from the resulting directory structure
3. Invoke the assertIlluminaData method to check that the IlluminaData
object is correct.
assertIlluminaData in turn invokes assertIlluminaProject and
assertIlluminaUndetermined; assertIlluminaProject invokes
assertIlluminaSample.
"""
def setUp(self):
# Create a mock Illumina directory
self.mock_illumina_data = None
def tearDown(self):
# Remove the test directory
if self.mock_illumina_data is not None:
self.mock_illumina_data.remove()
def makeMockIlluminaData(self,paired_end=False,
multiple_projects=False,
multiplexed_run=False):
# Create initial mock dir
mock_illumina_data = MockIlluminaData('test.MockIlluminaData',
paired_end=paired_end)
# Add first project with two samples
mock_illumina_data.add_fastq_batch('AB','AB1','AB1_GCCAAT',lanes=(1,))
mock_illumina_data.add_fastq_batch('AB','AB2','AB2_AGTCAA',lanes=(1,))
# Additional projects?
if multiplexed_run:
if multiplexed_run:
lanes=(1,4,5)
mock_illumina_data.add_undetermined(lanes=lanes)
else:
lanes=(1,)
mock_illumina_data.add_fastq_batch('CDE','CDE3','CDE3_GCCAAT',lanes=lanes)
mock_illumina_data.add_fastq_batch('CDE','CDE4','CDE4_AGTCAA',lanes=lanes)
# Create and finish
self.mock_illumina_data = mock_illumina_data
self.mock_illumina_data.create()
def assertIlluminaData(self,illumina_data,mock_illumina_data):
"""Verify that an IlluminaData object matches a MockIlluminaData object
"""
# Check top-level attributes
self.assertEqual(illumina_data.analysis_dir,mock_illumina_data.dirn,
"Directories differ: %s != %s" %
(illumina_data.analysis_dir,mock_illumina_data.dirn))
self.assertEqual(illumina_data.unaligned_dir,mock_illumina_data.unaligned_dir,
"Unaligned dirs differ: %s != %s" %
(illumina_data.unaligned_dir,mock_illumina_data.unaligned_dir))
self.assertEqual(illumina_data.paired_end,mock_illumina_data.paired_end,
"Paired ended-ness differ: %s != %s" %
(illumina_data.paired_end,mock_illumina_data.paired_end))
# Check projects
for project,pname in zip(illumina_data.projects,mock_illumina_data.projects):
self.assertIlluminaProject(project,mock_illumina_data,pname)
# Check undetermined indices
self.assertIlluminaUndetermined(illumina_data.undetermined,mock_illumina_data)
def assertIlluminaProject(self,illumina_project,mock_illumina_data,project_name):
"""Verify that an IlluminaProject object matches a MockIlluminaData object
"""
# Check top-level attributes
self.assertEqual(illumina_project.name,project_name)
self.assertEqual(illumina_project.paired_end,mock_illumina_data.paired_end)
# Check samples within projects
for sample,sname in zip(illumina_project.samples,
mock_illumina_data.samples_in_project(project_name)):
self.assertIlluminaSample(sample,mock_illumina_data,project_name,sname)
def assertIlluminaSample(self,illumina_sample,mock_illumina_data,
project_name,sample_name):
"""Verify that an IlluminaSample object matches a MockIlluminaData object
"""
# Check top-level attributes
self.assertEqual(illumina_sample.name,sample_name)
self.assertEqual(illumina_sample.paired_end,mock_illumina_data.paired_end)
# Check fastqs
for fastq,fq in zip(illumina_sample.fastq,
mock_illumina_data.fastqs_in_sample(project_name,
sample_name)):
self.assertEqual(fastq,fq)
# Check fastq subsets
r1_fastqs = illumina_sample.fastq_subset(read_number=1)
r2_fastqs = illumina_sample.fastq_subset(read_number=2)
self.assertEqual(len(r1_fastqs)+len(r2_fastqs),
len(illumina_sample.fastq))
if not illumina_sample.paired_end:
# For single end data all fastqs are R1 and there are no R2
for fastq,fq in zip(illumina_sample.fastq,r1_fastqs):
self.assertEqual(fastq,fq)
self.assertEqual(len(r2_fastqs),0)
else:
# For paired end data check R1 and R2 files match up
for fastq_r1,fastq_r2 in zip(r1_fastqs,r2_fastqs):
fqr1 = IlluminaFastq(fastq_r1)
fqr2 = IlluminaFastq(fastq_r2)
self.assertEqual(fqr1.read_number,1)
self.assertEqual(fqr2.read_number,2)
self.assertEqual(fqr1.sample_name,fqr2.sample_name)
self.assertEqual(fqr1.barcode_sequence,fqr2.barcode_sequence)
self.assertEqual(fqr1.lane_number,fqr2.lane_number)
self.assertEqual(fqr1.set_number,fqr2.set_number)
def assertIlluminaUndetermined(self,undetermined,mock_illumina_data):
"""Verify that Undetermined_indices project matches MockIlluminaData
"""
self.assertEqual((undetermined is not None),mock_illumina_data.has_undetermined)
if undetermined is not None:
# Delegate checking to assertIlluminaProject
self.assertIlluminaProject(undetermined,
mock_illumina_data,undetermined.name)
def test_illumina_data(self):
"""Basic test with single project
"""
self.makeMockIlluminaData()
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertIlluminaData(illumina_data,self.mock_illumina_data)
def test_illumina_data_paired_end(self):
"""Test with single project & paired-end data
"""
self.makeMockIlluminaData(paired_end=True)
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertIlluminaData(illumina_data,self.mock_illumina_data)
def test_illumina_data_multiple_projects(self):
"""Test with multiple projects
"""
self.makeMockIlluminaData(multiple_projects=True)
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertIlluminaData(illumina_data,self.mock_illumina_data)
def test_illumina_data_multiple_projects_paired_end(self):
"""Test with multiple projects & paired-end data
"""
self.makeMockIlluminaData(multiple_projects=True,paired_end=True)
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertIlluminaData(illumina_data,self.mock_illumina_data)
def test_illumina_data_multiple_projects_multiplexed(self):
"""Test with multiple projects & multiplexing
"""
self.makeMockIlluminaData(multiple_projects=True,multiplexed_run=True)
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertIlluminaData(illumina_data,self.mock_illumina_data)
def test_illumina_data_multiple_projects_multiplexed_paired_end(self):
"""Test with multiple projects, multiplexing & paired-end data
"""
self.makeMockIlluminaData(multiple_projects=True,multiplexed_run=True,
paired_end=True)
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertIlluminaData(illumina_data,self.mock_illumina_data)
class TestCasavaSampleSheet(unittest.TestCase):
def setUp(self):
# Set up test data with duplicated names
self.sample_sheet_data = [
['DADA331XX',1,'PhiX','PhiX control','','Control','','','Peter','Control'],
['DADA331XX',2,'884-1','PB-884-1','AGTCAA','RNA-seq','','','Peter','AR'],
['DADA331XX',3,'885-1','PB-885-1','AGTTCC','RNA-seq','','','Peter','AR'],
['DADA331XX',4,'886-1','PB-886-1','ATGTCA','RNA-seq','','','Peter','AR'],
['DADA331XX',5,'884-1','PB-884-1','AGTCAA','RNA-seq','','','Peter','AR'],
['DADA331XX',6,'885-1','PB-885-1','AGTTCC','RNA-seq','','','Peter','AR'],
['DADA331XX',7,'886-1','PB-886-1','ATGTCA','RNA-seq','','','Peter','AR'],
['DADA331XX',8,'PhiX','PhiX control','','Control','','','Peter','Control']
]
text = []
for line in self.sample_sheet_data:
text.append(','.join([str(x) for x in line]))
self.sample_sheet_text = "FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject\n" + '\n'.join(text)
def test_read_sample_sheet(self):
"""Read valid sample sheet
"""
sample_sheet = CasavaSampleSheet(fp=cStringIO.StringIO(self.sample_sheet_text))
# Check number of lines read
self.assertEqual(len(sample_sheet),8,"Wrong number of lines")
# Check data items
for i in range(0,8):
self.assertEqual(sample_sheet[i]['FCID'],self.sample_sheet_data[i][0])
self.assertEqual(sample_sheet[i]['Lane'],self.sample_sheet_data[i][1])
self.assertEqual(sample_sheet[i]['SampleID'],self.sample_sheet_data[i][2])
self.assertEqual(sample_sheet[i]['SampleRef'],self.sample_sheet_data[i][3])
self.assertEqual(sample_sheet[i]['Index'],self.sample_sheet_data[i][4])
self.assertEqual(sample_sheet[i]['Description'],self.sample_sheet_data[i][5])
self.assertEqual(sample_sheet[i]['Control'],self.sample_sheet_data[i][6])
self.assertEqual(sample_sheet[i]['Recipe'],self.sample_sheet_data[i][7])
self.assertEqual(sample_sheet[i]['Operator'],self.sample_sheet_data[i][8])
self.assertEqual(sample_sheet[i]['SampleProject'],self.sample_sheet_data[i][9])
def test_duplicates(self):
"""Check and fix duplicated names
"""
# Set up
sample_sheet = CasavaSampleSheet(fp=cStringIO.StringIO(self.sample_sheet_text))
# Shouldn't find any duplicates when lanes are different
self.assertEqual(len(sample_sheet.duplicated_names),0)
# Create 3 duplicates by resetting lane numbers
sample_sheet[4]['Lane'] = 2
sample_sheet[5]['Lane'] = 3
sample_sheet[6]['Lane'] = 4
self.assertEqual(len(sample_sheet.duplicated_names),3)
# Fix and check again (should be none)
sample_sheet.fix_duplicated_names()
self.assertEqual(sample_sheet.duplicated_names,[])
def test_illegal_names(self):
"""Check for illegal characters in names
"""
# Set up and introduce bad names
sample_sheet = CasavaSampleSheet(fp=cStringIO.StringIO(self.sample_sheet_text))
sample_sheet[3]['SampleID'] = '886 1'
sample_sheet[4]['SampleProject'] = "AR?"
# Check for illegal names
self.assertEqual(len(sample_sheet.illegal_names),2)
# Fix and check again
sample_sheet.fix_illegal_names()
self.assertEqual(sample_sheet.illegal_names,[])
# Verify that character replacement worked correctly
self.assertEqual(sample_sheet[3]['SampleID'],'886_1')
self.assertEqual(sample_sheet[4]['SampleProject'],"AR")
def test_remove_quotes(self):
"""Remove double quotes from values
"""
# Set up
sample_sheet = CasavaSampleSheet(fp=cStringIO.StringIO("""FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject
"D190HACXX",1,"PB","PB","CGATGT","RNA-seq","N",,,"Peter Briggs"
"""))
self.assertEqual(sample_sheet[0]['FCID'],'D190HACXX')
self.assertEqual(sample_sheet[0]['Lane'],1)
self.assertEqual(sample_sheet[0]['SampleID'],'PB')
self.assertEqual(sample_sheet[0]['SampleRef'],'PB')
self.assertEqual(sample_sheet[0]['Index'],'CGATGT')
self.assertEqual(sample_sheet[0]['Description'],'RNA-seq')
self.assertEqual(sample_sheet[0]['Control'],'N')
self.assertEqual(sample_sheet[0]['Recipe'],'')
self.assertEqual(sample_sheet[0]['Operator'],'')
self.assertEqual(sample_sheet[0]['SampleProject'],'Peter Briggs')
def test_remove_quotes_and_comments(self):
"""Remove double quotes from values along with comment lines
"""
# Set up
sample_sheet = CasavaSampleSheet(fp=cStringIO.StringIO("""FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject
"D190HACXX",1,"PB","PB","CGATGT","RNA-seq","N",,,"Peter Briggs"
"#D190HACXX",2,"PB","PB","ACTGAT","RNA-seq","N",,,"Peter Briggs"
"""))
self.assertEqual(len(sample_sheet),1)
def test_numeric_names(self):
"""Check that purely numerical names can be handled
"""
# Set up and introduce numeric names
sample_sheet = CasavaSampleSheet(fp=cStringIO.StringIO(self.sample_sheet_text))
sample_sheet[3]['SampleID'] = 8861
sample_sheet[4]['SampleProject'] = 123
# Check for illegal names
self.assertEqual(len(sample_sheet.illegal_names),0)
# Check for empty names
self.assertEqual(len(sample_sheet.empty_names),0)
# Check for duplicated names
self.assertEqual(len(sample_sheet.duplicated_names),0)
class TestIlluminaFastq(unittest.TestCase):
def test_illumina_fastq(self):
"""Check extraction of fastq name components
"""
fastq_name = 'NA10831_ATCACG_L002_R1_001'
fq = IlluminaFastq(fastq_name)
self.assertEqual(fq.fastq,fastq_name)
self.assertEqual(fq.sample_name,'NA10831')
self.assertEqual(fq.barcode_sequence,'ATCACG')
self.assertEqual(fq.lane_number,2)
self.assertEqual(fq.read_number,1)
self.assertEqual(fq.set_number,1)
def test_illumina_fastq_with_path_and_extension(self):
"""Check extraction of name components with leading path and extension
"""
fastq_name = '/home/galaxy/NA10831_ATCACG_L002_R1_001.fastq.gz'
fq = IlluminaFastq(fastq_name)
self.assertEqual(fq.fastq,fastq_name)
self.assertEqual(fq.sample_name,'NA10831')
self.assertEqual(fq.barcode_sequence,'ATCACG')
self.assertEqual(fq.lane_number,2)
self.assertEqual(fq.read_number,1)
self.assertEqual(fq.set_number,1)
def test_illumina_fastq_r2(self):
"""Check extraction of fastq name components for R2 read
"""
fastq_name = 'NA10831_ATCACG_L002_R2_001'
fq = IlluminaFastq(fastq_name)
self.assertEqual(fq.fastq,fastq_name)
self.assertEqual(fq.sample_name,'NA10831')
self.assertEqual(fq.barcode_sequence,'ATCACG')
self.assertEqual(fq.lane_number,2)
self.assertEqual(fq.read_number,2)
self.assertEqual(fq.set_number,1)
def test_illumina_fastq_no_index(self):
"""Check extraction of fastq name components without a barcode
"""
fastq_name = 'NA10831_NoIndex_L002_R1_001'
fq = IlluminaFastq(fastq_name)
self.assertEqual(fq.fastq,fastq_name)
self.assertEqual(fq.sample_name,'NA10831')
self.assertEqual(fq.barcode_sequence,None)
self.assertEqual(fq.lane_number,2)
self.assertEqual(fq.read_number,1)
self.assertEqual(fq.set_number,1)
def test_illumina_fastq_dual_index(self):
"""Check extraction of fastq name components with dual index
"""
fastq_name = 'NA10831_ATCACG-GCACTA_L002_R1_001'
fq = IlluminaFastq(fastq_name)
self.assertEqual(fq.fastq,fastq_name)
self.assertEqual(fq.sample_name,'NA10831')
self.assertEqual(fq.barcode_sequence,'ATCACG-GCACTA')
self.assertEqual(fq.lane_number,2)
self.assertEqual(fq.read_number,1)
self.assertEqual(fq.set_number,1)
class TestIEMSampleSheet(unittest.TestCase):
def setUp(self):
self.hiseq_sample_sheet_content = """[Header],,,,,,,,,,
IEMFileVersion,4,,,,,,,,,
Date,06/03/2014,,,,,,,,,
Workflow,GenerateFASTQ,,,,,,,,,
Application,HiSeq FASTQ Only,,,,,,,,,
Assay,Nextera,,,,,,,,,
Description,,,,,,,,,,
Chemistry,Amplicon,,,,,,,,,
,,,,,,,,,,
[Reads],,,,,,,,,,
101,,,,,,,,,,
101,,,,,,,,,,
,,,,,,,,,,
[Settings],,,,,,,,,,
ReverseComplement,0,,,,,,,,,
Adapter,CTGTCTCTTATACACATCT,,,,,,,,,
,,,,,,,,,,
[Data],,,,,,,,,,
Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description
1,PJB1-1579,PJB1-1579,,,N701,CGATGTAT ,N501,TCTTTCCC,PeterBriggs,
1,PJB2-1580,PJB2-1580,,,N702,TGACCAAT ,N502,TCTTTCCC,PeterBriggs,
"""
self.miseq_sample_sheet_content = """[Header]
IEMFileVersion,4
Date,4/11/2014
Workflow,Metagenomics
Application,Metagenomics 16S rRNA
Assay,Nextera XT
Description,
Chemistry,Amplicon
[Reads]
150
150
[Settings]
Adapter,CTGTCTCTTATACACATCT
[Data]
Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description
A8,A8,,,N701,TAAGGCGA,S501,TAGATCGC,PJB,
B8,B8,,,N702,CGTACTAG,S501,TAGATCGC,PJB,
"""
def test_load_hiseq_sample_sheet(self):
"""IEMSampleSheet: load a HiSEQ sample sheet
"""
iem = IEMSampleSheet(fp=cStringIO.StringIO(self.hiseq_sample_sheet_content))
# Check header
self.assertEqual(iem.header_items,['IEMFileVersion',
'Date',
'Workflow',
'Application',
'Assay',
'Description',
'Chemistry'])
self.assertEqual(iem.header['IEMFileVersion'],'4')
self.assertEqual(iem.header['Date'],'06/03/2014')
self.assertEqual(iem.header['Workflow'],'GenerateFASTQ')
self.assertEqual(iem.header['Application'],'HiSeq FASTQ Only')
self.assertEqual(iem.header['Assay'],'Nextera')
self.assertEqual(iem.header['Description'],'')
self.assertEqual(iem.header['Chemistry'],'Amplicon')
# Check reads
self.assertEqual(iem.reads,['101','101'])
# Check settings
self.assertEqual(iem.settings_items,['ReverseComplement',
'Adapter'])
self.assertEqual(iem.settings['ReverseComplement'],'0')
self.assertEqual(iem.settings['Adapter'],'CTGTCTCTTATACACATCT')
# Check data
self.assertEqual(iem.data.header(),['Lane','Sample_ID','Sample_Name',
'Sample_Plate','Sample_Well',
'I7_Index_ID','index',
'I5_Index_ID','index2',
'Sample_Project','Description'])
self.assertEqual(len(iem.data),2)
self.assertEqual(iem.data[0]['Lane'],1)
self.assertEqual(iem.data[0]['Sample_ID'],'PJB1-1579')
self.assertEqual(iem.data[0]['Sample_Name'],'PJB1-1579')
self.assertEqual(iem.data[0]['Sample_Plate'],'')
self.assertEqual(iem.data[0]['Sample_Well'],'')
self.assertEqual(iem.data[0]['I7_Index_ID'],'N701')
self.assertEqual(iem.data[0]['index'],'CGATGTAT')
self.assertEqual(iem.data[0]['I5_Index_ID'],'N501')
self.assertEqual(iem.data[0]['index2'],'TCTTTCCC')
self.assertEqual(iem.data[0]['Sample_Project'],'PeterBriggs')
self.assertEqual(iem.data[0]['Description'],'')
def test_show_hiseq_sample_sheet(self):
"""IEMSampleSheet: reconstruct a HiSEQ sample sheet
"""
iem = IEMSampleSheet(fp=cStringIO.StringIO(self.hiseq_sample_sheet_content))
expected = """[Header]
IEMFileVersion,4
Date,06/03/2014
Workflow,GenerateFASTQ
Application,HiSeq FASTQ Only
Assay,Nextera
Description,
Chemistry,Amplicon
[Reads]
101
101
[Settings]
ReverseComplement,0
Adapter,CTGTCTCTTATACACATCT
[Data]
Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description
1,PJB1-1579,PJB1-1579,,,N701,CGATGTAT,N501,TCTTTCCC,PeterBriggs,
1,PJB2-1580,PJB2-1580,,,N702,TGACCAAT,N502,TCTTTCCC,PeterBriggs,
"""
for l1,l2 in zip(iem.show().split(),expected.split()):
self.assertEqual(l1,l2)
def test_convert_hiseq_sample_sheet_to_casava(self):
"""IEMSampleSheet: convert HiSEQ sample sheet to CASAVA format
"""
iem = IEMSampleSheet(fp=cStringIO.StringIO(self.hiseq_sample_sheet_content))
casava = iem.casava_sample_sheet()
self.assertEqual(casava.header(),['FCID','Lane','SampleID','SampleRef',
'Index','Description','Control',
'Recipe','Operator','SampleProject'])
self.assertEqual(len(casava),2)
self.assertEqual(casava[0]['FCID'],'FC1')
self.assertEqual(casava[0]['Lane'],1)
self.assertEqual(casava[0]['SampleID'],'PJB1-1579')
self.assertEqual(casava[0]['SampleRef'],'')
self.assertEqual(casava[0]['Index'],'CGATGTAT-TCTTTCCC')
self.assertEqual(casava[0]['Description'],'')
self.assertEqual(casava[0]['Control'],'')
self.assertEqual(casava[0]['Recipe'],'')
self.assertEqual(casava[0]['Operator'],'')
self.assertEqual(casava[0]['SampleProject'],'PeterBriggs')
def test_load_miseq_sample_sheet(self):
"""IEMSampleSheet: load a MiSEQ sample sheet
"""
iem = IEMSampleSheet(fp=cStringIO.StringIO(self.miseq_sample_sheet_content))
# Check header
self.assertEqual(iem.header_items,['IEMFileVersion',
'Date',
'Workflow',
'Application',
'Assay',
'Description',
'Chemistry'])
self.assertEqual(iem.header['IEMFileVersion'],'4')
self.assertEqual(iem.header['Date'],'4/11/2014')
self.assertEqual(iem.header['Workflow'],'Metagenomics')
self.assertEqual(iem.header['Application'],'Metagenomics 16S rRNA')
self.assertEqual(iem.header['Assay'],'Nextera XT')
self.assertEqual(iem.header['Description'],'')
self.assertEqual(iem.header['Chemistry'],'Amplicon')
# Check reads
self.assertEqual(iem.reads,['150','150'])
# Check settings
self.assertEqual(iem.settings_items,['Adapter'])
self.assertEqual(iem.settings['Adapter'],'CTGTCTCTTATACACATCT')
# Check data
self.assertEqual(iem.data.header(),['Sample_ID','Sample_Name',
'Sample_Plate','Sample_Well',
'I7_Index_ID','index',
'I5_Index_ID','index2',
'Sample_Project','Description'])
self.assertEqual(len(iem.data),2)
self.assertEqual(iem.data[0]['Sample_ID'],'A8')
self.assertEqual(iem.data[0]['Sample_Name'],'A8')
self.assertEqual(iem.data[0]['Sample_Plate'],'')
self.assertEqual(iem.data[0]['Sample_Well'],'')
self.assertEqual(iem.data[0]['I7_Index_ID'],'N701')
self.assertEqual(iem.data[0]['index'],'TAAGGCGA')
self.assertEqual(iem.data[0]['I5_Index_ID'],'S501')
self.assertEqual(iem.data[0]['index2'],'TAGATCGC')
self.assertEqual(iem.data[0]['Sample_Project'],'PJB')
self.assertEqual(iem.data[0]['Description'],'')
def test_show_miseq_sample_sheet(self):
"""IEMSampleSheet: reconstruct a MiSEQ sample sheet
"""
iem = IEMSampleSheet(fp=cStringIO.StringIO(self.miseq_sample_sheet_content))
expected = self.miseq_sample_sheet_content
for l1,l2 in zip(iem.show().split(),expected.split()):
self.assertEqual(l1,l2)
def test_convert_miseq_sample_sheet_to_casava(self):
"""IEMSampleSheet: convert MiSEQ sample sheet to CASAVA format
"""
iem = IEMSampleSheet(fp=cStringIO.StringIO(self.miseq_sample_sheet_content))
casava = iem.casava_sample_sheet()
self.assertEqual(casava.header(),['FCID','Lane','SampleID','SampleRef',
'Index','Description','Control',
'Recipe','Operator','SampleProject'])
self.assertEqual(len(casava),2)
self.assertEqual(casava[0]['FCID'],'FC1')
self.assertEqual(casava[0]['Lane'],1)
self.assertEqual(casava[0]['SampleID'],'A8')
self.assertEqual(casava[0]['SampleRef'],'')
self.assertEqual(casava[0]['Index'],'TAAGGCGA-TAGATCGC')
self.assertEqual(casava[0]['Description'],'')
self.assertEqual(casava[0]['Control'],'')
self.assertEqual(casava[0]['Recipe'],'')
self.assertEqual(casava[0]['Operator'],'')
self.assertEqual(casava[0]['SampleProject'],'PJB')
def test_bad_input_unrecognised_section(self):
"""IEMSampleSheet: raises exception for input with unrecognised section
"""
fp = cStringIO.StringIO("""[Header]
IEMFileVersion,4
Date,06/03/2014
[Footer]
This,isTheEnd
""")
self.assertRaises(IlluminaDataError,IEMSampleSheet,fp=fp)
def test_bad_input_not_IEM_sample_sheet(self):
"""IEMSampleSheet: raises exception for non-IEM formatted input
"""
fp = cStringIO.StringIO("""Something random
IEMFileVersion,4
Date,06/03/2014
[Footer]
This,isTheEnd
""")
self.assertRaises(IlluminaDataError,IEMSampleSheet,fp=fp)
class TestMiseqToCasavaConversion(unittest.TestCase):
def setUp(self):
self.miseq_header = """[Header]
IEMFileVersion,4
Investigator Name,
Project Name,
Experiment Name,
Date,1/18/2013
Workflow,GenerateFASTQ
Application,FASTQ Only
Assay,TruSeq LT
Description,
Chemistry,Default
[Reads]
50
[Settings]
[Data]"""
# Example of single index data
self.miseq_data = self.miseq_header + """
Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,Sample_Project,Description
PB1,,PB,A01,A001,ATCACG,PB,
PB2,,PB,A02,A002,CGATGT,PB,
PB3,,PB,A03,A006,GCCAAT,PB,
PB4,,PB,A04,A008,ACTTGA,PB,
ID3,,PB,A05,A012,CTTGTA,ID,
ID4,,PB,A06,A019,GTGAAA,ID,"""
self.miseq_sample_ids = ['PB1','PB2','PB3','PB4','ID3','ID4']
self.miseq_sample_projects = ['PB','PB','PB','PB','ID','ID']
self.miseq_index_ids = ['ATCACG','CGATGT','GCCAAT','ACTTGA','CTTGTA','GTGAAA']
# Example of dual-indexed data
self.miseq_data_dual_indexed = self.miseq_header + """
Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description,GenomeFolder
PB1,,PB,A01,N701,TAAGGCGA,N501,TAGATCGC,,,
ID2,,PB,A02,N702,CGTACTAG,N502,CTCTCTAT,,,"""
self.miseq_dual_indexed_sample_ids = ['PB1','ID2']
self.miseq_dual_indexed_sample_projects = ['PB','ID']
self.miseq_dual_indexed_index_ids = ['TAAGGCGA-TAGATCGC','CGTACTAG-CTCTCTAT']
# Example of no-index data
self.miseq_data_no_index = self.miseq_header + """
Sample_ID,Sample_Name,Sample_Plate,Sample_Well,Sample_Project,Description
PB2,PB2,,,PB,"""
self.miseq_no_index_sample_ids = ['PB2']
self.miseq_no_index_sample_projects = ['PB']
self.miseq_no_index_index_ids = ['']
def test_convert_miseq_to_casava(self):
"""Convert MiSeq SampleSheet to CASAVA SampleSheet
"""
# Make sample sheet from MiSEQ data
sample_sheet = convert_miseq_samplesheet_to_casava(
fp=cStringIO.StringIO(self.miseq_data))
# Check contents
self.assertEqual(len(sample_sheet),6)
for i in range(0,6):
self.assertEqual(sample_sheet[i]['Lane'],1)
self.assertEqual(sample_sheet[i]['SampleID'],self.miseq_sample_ids[i])
self.assertEqual(sample_sheet[i]['SampleProject'],self.miseq_sample_projects[i])
self.assertEqual(sample_sheet[i]['Index'],self.miseq_index_ids[i])
def test_convert_miseq_to_casava_dual_indexed(self):
"""Convert MiSeq SampleSheet to CASAVA SampleSheet (dual indexed)
"""
# Make sample sheet from MiSEQ data
sample_sheet = convert_miseq_samplesheet_to_casava(
fp=cStringIO.StringIO(self.miseq_data_dual_indexed))
# Check contents
self.assertEqual(len(sample_sheet),2)
for i in range(0,2):
self.assertEqual(sample_sheet[i]['Lane'],1)
self.assertEqual(sample_sheet[i]['SampleID'],self.miseq_dual_indexed_sample_ids[i])
self.assertEqual(sample_sheet[i]['SampleProject'],
self.miseq_dual_indexed_sample_projects[i])
self.assertEqual(sample_sheet[i]['Index'],
self.miseq_dual_indexed_index_ids[i])
def test_convert_miseq_to_casava_no_index(self):
"""Convert MiSeq SampleSheet to CASAVA SampleSheet (no index)
"""
# Make sample sheet from MiSEQ data
sample_sheet = convert_miseq_samplesheet_to_casava(
fp=cStringIO.StringIO(self.miseq_data_no_index))
self.assertEqual(len(sample_sheet),1)
for i in range(0,1):
self.assertEqual(sample_sheet[i]['Lane'],1)
self.assertEqual(sample_sheet[i]['SampleID'],self.miseq_no_index_sample_ids[i])
self.assertEqual(sample_sheet[i]['SampleProject'],
self.miseq_no_index_sample_projects[i])
self.assertEqual(sample_sheet[i]['Index'],
self.miseq_no_index_index_ids[i])
class TestHiseqToCasavaConversion(unittest.TestCase):
def setUp(self):
self.hiseq_header = """[Header],,,,,,,,
IEMFileVersion,4,,,,,,,
Experiment Name,HiSeq2,,,,,,,
Date,08/01/2013,,,,,,,
Workflow,GenerateFASTQ,,,,,,,
Application,HiSeq FASTQ Only,,,,,,,
Assay,TruSeq LT,,,,,,,
Description,,,,,,,,
Chemistry,Default,,,,,,,
,,,,,,,,
[Reads],,,,,,,,
101,,,,,,,,
101,,,,,,,,
,,,,,,,,
[Settings],,,,,,,,
ReverseComplement,0,,,,,,,
Adapter,AGATCGGAAGAGCACACGTCTGAACTCCAGTCA,,,,,,,
AdapterRead2,AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT,,,,,,,
,,,,,,,,
[Data],,,,,,,,"""
# Example of single index data
self.hiseq_data = self.hiseq_header + """
Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,Sample_Project,Description
1,PJB3,PJB3,,,A006,GCCAAT,,
1,PJB4,PJB4,,,A007,CAGATC,,
2,PB1-input,PB1-input,,,A002,CGATGT,,
2,PB2,PB2,,,A004,TGACCA,,
3,PB1-input,PB1-input,,,A002,CGATGT,,
4,PJB3,PJB3,,,A006,GCCAAT,,
4,PJB4,PJB4,,,A007,CAGATC,,
5,PJB5,PJB5,,,A012,CTTGTA,,
5,PJB6,PJB6,,,A013,AGTCAA,,
6,PJB4,PJB4,,,A007,CAGATC,,
7,PJB5,PJB5,,,A012,CTTGTA,,
8,PJB6,PJB6,,,A013,AGTCAA,,"""
self.hiseq_lanes = [1,1,2,2,3,4,4,5,5,6,7,8]
self.hiseq_sample_ids = ['PJB3','PJB4','PB1-input','PB2','PB1-input','PJB3',
'PJB4','PJB5','PJB6','PJB4','PJB5','PJB6']
self.hiseq_sample_projects = ['PJB','PJB','PB','PB','PB','PJB',
'PJB','PJB','PJB','PJB','PJB','PJB']
self.hiseq_index_ids = ['GCCAAT','CAGATC','CGATGT','TGACCA',
'CGATGT','GCCAAT','CAGATC','CTTGTA',
'AGTCAA','CAGATC','CTTGTA','AGTCAA']
def test_convert_hiseq_to_casava(self):
"""Convert Experimental Manager HiSeq SampleSheet to CASAVA SampleSheet
"""
# Make sample sheet from HiSEQ data
sample_sheet = get_casava_sample_sheet(fp=cStringIO.StringIO(self.hiseq_data))
# Check contents
self.assertEqual(len(sample_sheet),12)
for i in range(0,12):
self.assertEqual(sample_sheet[i]['Lane'],self.hiseq_lanes[i])
self.assertEqual(sample_sheet[i]['SampleID'],self.hiseq_sample_ids[i])
self.assertEqual(sample_sheet[i]['SampleProject'],self.hiseq_sample_projects[i])
self.assertEqual(sample_sheet[i]['Index'],self.hiseq_index_ids[i])
def test_hiseq_to_casava_handle_space_in_index_sequence(self):
"""Handle trailing space when converting Experimental Manager sample sheet
"""
self.hiseq_data = self.hiseq_header + """
Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description
1,PJB1,PJB1,,,N703,CTTGTAAT ,N502,TCTTTCCC,PeterBriggs,"""
# Make sample sheet from HiSEQ data
sample_sheet = get_casava_sample_sheet(fp=cStringIO.StringIO(self.hiseq_data))
# Check contents
self.assertEqual(len(sample_sheet),1)
line = sample_sheet[0]
self.assertEqual(line['Lane'],1)
self.assertEqual(line['SampleID'],'PJB1')
self.assertEqual(line['SampleProject'],'PeterBriggs')
self.assertEqual(line['Index'],'CTTGTAAT-TCTTTCCC')
class TestVerifyRunAgainstSampleSheet(unittest.TestCase):
def setUp(self):
# Create a mock Illumina directory
self.top_dir = tempfile.mkdtemp()
self.mock_illumina_data = MockIlluminaData('test.MockIlluminaData',
paired_end=True,
top_dir=self.top_dir)
self.mock_illumina_data.add_fastq_batch('AB','AB1','AB1_GCCAAT',lanes=(1,))
self.mock_illumina_data.add_fastq_batch('AB','AB2','AB2_AGTCAA',lanes=(1,))
self.mock_illumina_data.add_fastq_batch('CDE','CDE3','CDE3_GCCAAT',lanes=(2,3))
self.mock_illumina_data.add_fastq_batch('CDE','CDE4','CDE4_AGTCAA',lanes=(2,3))
self.mock_illumina_data.add_undetermined(lanes=(1,2,3))
self.mock_illumina_data.create()
# Sample sheet
fno,self.sample_sheet = tempfile.mkstemp()
fp = os.fdopen(fno,'w')
fp.write("""FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject
FC1,1,AB1,,GCCAAT,,,,,AB
FC1,1,AB2,,AGTCAA,,,,,AB
FC1,2,CDE3,,GCCAAT,,,,,CDE
FC1,2,CDE4,,AGTCAA,,,,,CDE
FC1,3,CDE3,,GCCAAT,,,,,CDE
FC1,3,CDE4,,AGTCAA,,,,,CDE""")
fp.close()
def tearDown(self):
# Remove the test directory
if self.mock_illumina_data is not None:
self.mock_illumina_data.remove()
os.rmdir(self.top_dir)
os.remove(self.sample_sheet)
def test_verify_run_against_sample_sheet(self):
"""Verify sample sheet against a matching run
"""
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertTrue(verify_run_against_sample_sheet(illumina_data,
self.sample_sheet))
def test_verify_run_against_sample_sheet_with_missing_project(self):
"""Verify sample sheet against a run with a missing project
"""
shutil.rmtree(os.path.join(self.mock_illumina_data.dirn,
self.mock_illumina_data.unaligned_dir,
"Project_AB"))
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertFalse(verify_run_against_sample_sheet(illumina_data,
self.sample_sheet))
def test_verify_run_against_sample_sheet_with_missing_sample(self):
"""Verify sample sheet against a run with a missing sample
"""
shutil.rmtree(os.path.join(self.mock_illumina_data.dirn,
self.mock_illumina_data.unaligned_dir,
"Project_AB","Sample_AB1"))
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertFalse(verify_run_against_sample_sheet(illumina_data,
self.sample_sheet))
def test_verify_run_against_sample_sheet_with_missing_fastq(self):
"""Verify sample sheet against a run with a missing fastq file
"""
os.remove(os.path.join(self.mock_illumina_data.dirn,
self.mock_illumina_data.unaligned_dir,
"Project_CDE","Sample_CDE4",
"CDE4_AGTCAA_L002_R2_001.fastq.gz"))
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertFalse(verify_run_against_sample_sheet(illumina_data,
self.sample_sheet))
class TestSummariseProjects(unittest.TestCase):
def setUp(self):
# Create a mock Illumina directory
self.top_dir = tempfile.mkdtemp()
self.mock_illumina_data = MockIlluminaData('test.MockIlluminaData',
paired_end=True,
top_dir=self.top_dir)
self.mock_illumina_data.add_fastq_batch('AB','AB1','AB1_GCCAAT',lanes=(1,))
self.mock_illumina_data.add_fastq_batch('AB','AB2','AB2_AGTCAA',lanes=(1,))
self.mock_illumina_data.add_fastq_batch('CDE','CDE3','CDE3_GCCAAT',lanes=(2,3))
self.mock_illumina_data.add_fastq_batch('CDE','CDE4','CDE4_AGTCAA',lanes=(2,3))
self.mock_illumina_data.add_undetermined(lanes=(1,2,3))
self.mock_illumina_data.create()
def tearDown(self):
# Remove the test directory
if self.mock_illumina_data is not None:
self.mock_illumina_data.remove()
os.rmdir(self.top_dir)
def test_summarise_projects_paired_end_run(self):
"""Summarise projects for paired end run
"""
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertEqual(summarise_projects(illumina_data),
"Paired end: AB (2 samples); CDE (2 samples)")
class TestDescribeProject(unittest.TestCase):
def setUp(self):
# Create a mock Illumina directory
self.top_dir = tempfile.mkdtemp()
self.mock_illumina_data = MockIlluminaData('test.MockIlluminaData',
paired_end=True,
top_dir=self.top_dir)
self.mock_illumina_data.add_fastq_batch('AB','AB1','AB1_GCCAAT',lanes=(1,))
self.mock_illumina_data.add_fastq_batch('AB','AB2','AB2_AGTCAA',lanes=(1,))
self.mock_illumina_data.add_fastq_batch('CDE','CDE3','CDE3_GCCAAT',lanes=(2,3))
self.mock_illumina_data.add_fastq_batch('CDE','CDE4','CDE4_AGTCAA',lanes=(2,3))
self.mock_illumina_data.add_undetermined(lanes=(1,2,3))
self.mock_illumina_data.create()
def tearDown(self):
# Remove the test directory
if self.mock_illumina_data is not None:
self.mock_illumina_data.remove()
os.rmdir(self.top_dir)
def test_describe_project_paired_end_run(self):
"""Generate descriptions for projects in a paired end run
"""
illumina_data = IlluminaData(self.mock_illumina_data.dirn)
self.assertEqual(describe_project(illumina_data.projects[0]),
"AB: AB1-2 (2 paired end samples)")
self.assertEqual(describe_project(illumina_data.projects[1]),
"CDE: CDE3-4 (2 paired end samples, multiple fastqs per sample)")
class TestUniqueFastqNames(unittest.TestCase):
def test_unique_names_single_fastq(self):
"""Check name for a single fastq
"""
fastqs = ['PJB-E_GCCAAT_L001_R1_001.fastq.gz']
mapping = get_unique_fastq_names(fastqs)
self.assertEqual(mapping['PJB-E_GCCAAT_L001_R1_001.fastq.gz'],
'PJB-E.fastq.gz')
def test_unique_names_single_sample_paired_end(self):
"""Check names for paired end fastqs from single sample
"""
fastqs = ['PJB-E_GCCAAT_L001_R1_001.fastq.gz',
'PJB-E_GCCAAT_L001_R2_001.fastq.gz']
mapping = get_unique_fastq_names(fastqs)
self.assertEqual(mapping['PJB-E_GCCAAT_L001_R1_001.fastq.gz'],
'PJB-E_R1.fastq.gz')
self.assertEqual(mapping['PJB-E_GCCAAT_L001_R2_001.fastq.gz'],
'PJB-E_R2.fastq.gz')
def test_unique_names_single_sample_multiple_lanes(self):
"""Check names for multiple fastqs from single sample
"""
fastqs = ['PJB-E_GCCAAT_L001_R1_001.fastq.gz',
'PJB-E_GCCAAT_L002_R1_001.fastq.gz']
mapping = get_unique_fastq_names(fastqs)
self.assertEqual(mapping['PJB-E_GCCAAT_L001_R1_001.fastq.gz'],
'PJB-E_L001.fastq.gz')
self.assertEqual(mapping['PJB-E_GCCAAT_L002_R1_001.fastq.gz'],
'PJB-E_L002.fastq.gz')
def test_unique_names_single_sample_multiple_lanes_paired_end(self):
"""Check names for multiple fastqs from single paired-end sample
"""
fastqs = ['PJB-E_GCCAAT_L001_R1_001.fastq.gz',
'PJB-E_GCCAAT_L001_R2_001.fastq.gz',
'PJB-E_GCCAAT_L002_R1_001.fastq.gz',
'PJB-E_GCCAAT_L002_R2_001.fastq.gz']
mapping = get_unique_fastq_names(fastqs)
self.assertEqual(mapping['PJB-E_GCCAAT_L001_R1_001.fastq.gz'],
'PJB-E_L001_R1.fastq.gz')
self.assertEqual(mapping['PJB-E_GCCAAT_L001_R2_001.fastq.gz'],
'PJB-E_L001_R2.fastq.gz')
self.assertEqual(mapping['PJB-E_GCCAAT_L002_R1_001.fastq.gz'],
'PJB-E_L002_R1.fastq.gz')
self.assertEqual(mapping['PJB-E_GCCAAT_L002_R2_001.fastq.gz'],
'PJB-E_L002_R2.fastq.gz')
def test_unique_names_multiple_samples_single_fastq(self):
"""Check names for multiple samples each with single fastq
"""
fastqs = ['PJB-E_GCCAAT_L001_R1_001.fastq.gz',
'PJB-A_AGTCAA_L001_R1_001.fastq.gz']
mapping = get_unique_fastq_names(fastqs)
self.assertEqual(mapping['PJB-E_GCCAAT_L001_R1_001.fastq.gz'],
'PJB-E.fastq.gz')
self.assertEqual(mapping['PJB-A_AGTCAA_L001_R1_001.fastq.gz'],
'PJB-A.fastq.gz')
class TestFixBasesMask(unittest.TestCase):
def test_fix_bases_mask_single_index(self):
"""Check fix_bases_mask for single index data
"""
self.assertEqual(fix_bases_mask('y50,I6','ACAGTG'),'y50,I6')
self.assertEqual(fix_bases_mask('y101,I7,y101','CGATGT'),'y101,I6n,y101')
def test_fix_bases_mask_dual_index(self):
"""Check fix_bases_mask for dual index data
"""
self.assertEqual(fix_bases_mask('y250,I8,I8,y250','TAAGGCGA-TAGATCGC'),
'y250,I8,I8,y250')
self.assertEqual(fix_bases_mask('y250,I8,I8,y250','TAAGGC-GATCGC'),
'y250,I6nn,I6nn,y250')
def test_fix_bases_mask_dual_index_to_single(self):
"""Check fix_bases_mask for dual index converted to single index
"""
self.assertEqual(fix_bases_mask('y250,I8,I8,y250','TAAGGCGA'),
'y250,I8,nnnnnnnn,y250')
self.assertEqual(fix_bases_mask('y250,I8,I8,y250','TAAGGC'),
'y250,I6nn,nnnnnnnn,y250')
class TestSplitRunName(unittest.TestCase):
def test_split_run_name(self):
"""Check split_run_name for various cases
"""
self.assertEqual(split_run_name('140210_M00879_0031_000000000-A69NA'),
('140210','M00879','0031'))
self.assertEqual(split_run_name('/mnt/data/140210_M00879_0031_000000000-A69NA'),
('140210','M00879','0031'))
def test_split_run_name_with_leading_path(self):
"""Check split_run_name with 'bad' names
"""
self.assertEqual(split_run_name('this_is_nonesense'),(None,None,None))
self.assertEqual(split_run_name('140210'),(None,None,None))
self.assertEqual(split_run_name('14021_M00879_0031_000000000-A69NA'),
(None,None,None))
self.assertEqual(split_run_name('140210_M00879'),
(None,None,None))
self.assertEqual(split_run_name('140210_M00879_0031'),
(None,None,None))
self.assertEqual(split_run_name('1402100_M00879_XYZ'),
(None,None,None))
#######################################################################
# Main program
#######################################################################
if __name__ == "__main__":
# Turn off most logging output for tests
logging.getLogger().setLevel(logging.CRITICAL)
# Run tests
unittest.main()
| fw1121/genomics | bcftbx/test/test_IlluminaData.py | Python | artistic-2.0 | 57,652 | [
"Galaxy"
] | cf4533ec0e52c44836e61b0832650abbfcf1d21c804fc78bf8454b20758e8e1e |
"""Sample aospy object library using the included example data."""
from datetime import datetime
import os
import aospy
from aospy import Model, Proj, Region, Run, Var
from aospy.data_loader import DictDataLoader
from aospy.internal_names import LAND_MASK_STR, LON_STR
rootdir = os.path.join(aospy.__path__[0], 'test', 'data', 'netcdf')
_file_map = {'monthly': os.path.join(rootdir,
'000[4-6]0101.precip_monthly.nc')}
example_run = Run(
name='example_run',
description=(
'Control simulation of the idealized moist model'
),
default_start_date=datetime(4, 1, 1),
default_end_date=datetime(6, 12, 31),
data_loader=DictDataLoader(_file_map)
)
example_model = Model(
name='example_model',
grid_file_paths=(os.path.join(rootdir, '00040101.precip_monthly.nc'),
os.path.join(rootdir, 'im.landmask.nc')),
runs=[example_run],
grid_attrs={LAND_MASK_STR: 'custom_land_mask', LON_STR: 'custom_lon'}
)
def total_precip(precip_largescale, precip_convective):
"""Sum of convective and large-scale precipitation.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
return precip_largescale + precip_convective
def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total)
precip_largescale = Var(
name='precip_largescale',
alt_names=('condensation_rain',),
def_time=True,
description='Precipitation generated via grid-scale condensation',
)
precip_convective = Var(
name='precip_convective',
alt_names=('convection_rain',),
def_time=True,
description='Precipitation generated by convective parameterization',
)
precip_total = Var(
name='precip_total',
def_time=True,
func=total_precip,
variables=(precip_largescale, precip_convective),
)
precip_conv_frac = Var(
name='precip_conv_frac',
def_time=True,
func=conv_precip_frac,
variables=(precip_largescale, precip_convective),
)
globe = Region(
name='globe',
description='Entire globe',
west_bound=0,
east_bound=360,
south_bound=-90,
north_bound=90,
do_land_mask=False
)
tropics = Region(
name='tropics',
description='Tropics, defined as 30S-30N',
west_bound=0,
east_bound=360,
south_bound=-30,
north_bound=30,
do_land_mask=False
)
example_proj = Proj(
'example_proj',
direc_out='example-output',
tar_direc_out='example-tar-output',
models=[example_model],
regions=(globe, tropics)
)
if __name__ == '__main__':
pass
| spencerkclark/aospy | aospy/examples/example_obj_lib.py | Python | apache-2.0 | 3,244 | [
"NetCDF"
] | 7656c8f878d5097268814224a9b9c159b94cd2eea523278b41649ed08e99c0a3 |
import os
voldir = "/projects/mindboggle/data/sulci_volumes/"
surfdir = "/Applications/freesurfer/subjects/Brainvisa62/"
files = os.listdir(surfdir)
for f in files:
for hemi in ['lh','rh']:
if hemi == 'lh':
hemicap = 'L'
else:
hemicap = 'R'
vol = voldir+hemicap+'Bottom_'+f+'_base2008_manual.ima.nii.gz '
surf = surfdir+f+'/surf/'+hemi+'.pial '
curv = surfdir+f+'/surf/'+hemi+'.curv '
outpt = f+'_manual.'+hemi+'.vtk '
c = 'python label_vol2surf.py ' + vol + surf + curv + outpt
print(c)
os.system(c)
| binarybottle/mindboggle_sidelined | run_label_vol2surf.py | Python | apache-2.0 | 607 | [
"VTK"
] | 18a79da77e88df6412669565bf57d8b63d2400aceb8765344b203c8a4406f213 |
"""
Tests related to the cohorting feature.
"""
from uuid import uuid4
from .helpers import BaseDiscussionMixin
from .helpers import CohortTestMixin
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...fixtures.course import (CourseFixture, XBlockFixtureDesc)
from ...pages.lms.discussion import (DiscussionTabSingleThreadPage, InlineDiscussionThreadPage, InlineDiscussionPage)
from ...pages.lms.courseware import CoursewarePage
from nose.plugins.attrib import attr
class NonCohortedDiscussionTestMixin(BaseDiscussionMixin):
"""
Mixin for tests of discussion in non-cohorted courses.
"""
def setup_cohorts(self):
"""
No cohorts are desired for this mixin.
"""
pass
def test_non_cohort_visibility_label(self):
self.setup_thread(1)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class CohortedDiscussionTestMixin(BaseDiscussionMixin, CohortTestMixin):
"""
Mixin for tests of discussion in cohorted courses.
"""
def setup_cohorts(self):
"""
Sets up the course to use cohorting with a single defined cohort group.
"""
self.setup_cohort_config(self.course_fixture)
self.cohort_1_name = "Cohort Group 1"
self.cohort_1_id = self.add_manual_cohort(self.course_fixture, self.cohort_1_name)
def test_cohort_visibility_label(self):
# Must be moderator to view content in a cohort other than your own
AutoAuthPage(self.browser, course_id=self.course_id, roles="Moderator").visit()
self.thread_id = self.setup_thread(1, group_id=self.cohort_1_id)
self.assertEquals(
self.thread_page.get_group_visibility_label(),
"This post is visible only to {}.".format(self.cohort_1_name)
)
# Disable cohorts and verify that the post now shows as visible to everyone.
self.disable_cohorting(self.course_fixture)
self.refresh_thread_page(self.thread_id)
self.assertEquals(self.thread_page.get_group_visibility_label(), "This post is visible to everyone.")
class DiscussionTabSingleThreadTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
# Create a course to register for
self.course_fixture = CourseFixture(**self.course_info).install()
self.setup_cohorts()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = DiscussionTabSingleThreadPage(self.browser, self.course_id, thread_id) # pylint:disable=W0201
self.thread_page.visit()
# pylint:disable=W0613
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.thread_page.wait_for_page()
@attr('shard_1')
class CohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, CohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single cohorted thread.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr('shard_1')
class NonCohortedDiscussionTabSingleThreadTest(DiscussionTabSingleThreadTest, NonCohortedDiscussionTestMixin):
"""
Tests for the discussion page displaying a single non-cohorted thread.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
class InlineDiscussionTest(UniqueCourseTest):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fixture = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
self.setup_cohorts()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
def setup_thread_page(self, thread_id):
CoursewarePage(self.browser, self.course_id).visit()
self.show_thread(thread_id)
def show_thread(self, thread_id):
discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
discussion_page.expand_discussion()
self.assertEqual(discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint:disable=W0201
self.thread_page.expand()
def refresh_thread_page(self, thread_id):
self.browser.refresh()
self.show_thread(thread_id)
@attr('shard_1')
class CohortedInlineDiscussionTest(InlineDiscussionTest, CohortedDiscussionTestMixin):
"""
Tests for cohorted inline discussions.
"""
# Actual test method(s) defined in CohortedDiscussionTestMixin.
pass
@attr('shard_1')
class NonCohortedInlineDiscussionTest(InlineDiscussionTest, NonCohortedDiscussionTestMixin):
"""
Tests for non-cohorted inline discussions.
"""
# Actual test method(s) defined in NonCohortedDiscussionTestMixin.
pass
| jruiperezv/ANALYSE | common/test/acceptance/tests/discussion/test_cohorts.py | Python | agpl-3.0 | 5,708 | [
"VisIt"
] | 577914c3d51f46b0c2c99ae2519d73d75678f78213a8d5eda7ecbd4e136fceda |
# $Id: nodes.py 6351 2010-07-03 14:19:09Z gbrandl $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import warnings
import types
import unicodedata
# ==============================
# Functional Node Base Classes
# ==============================
class Node(object):
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return True
if sys.version_info < (3,):
# on 2.x, str(node) will be a byte string with Unicode
# characters > 255 escaped; on 3.x this is no longer necessary
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
Return true if we should stop the traversal.
"""
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return stop
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
if child.walk(visitor):
stop = 1
break
except SkipSiblings:
pass
except StopTraversal:
stop = 1
return stop
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
Return true if we should stop the traversal.
"""
call_depart = 1
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return stop
except SkipDeparture:
call_depart = 0
children = self.children
try:
for child in children[:]:
if child.walkabout(visitor):
stop = 1
break
except SkipSiblings:
pass
except SkipChildren:
pass
except StopTraversal:
stop = 1
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
return stop
def _fast_traverse(self, cls):
"""Specialized traverse() that only supports instance checks."""
result = []
if isinstance(self, cls):
result.append(self)
for child in self.children:
result.extend(child._fast_traverse(cls))
return result
def _all_traverse(self):
"""Specialized traverse() that doesn't check for a condition."""
result = []
result.append(self)
for child in self.children:
result.extend(child._all_traverse())
return result
def traverse(self, condition=None,
include_self=1, descend=1, siblings=0, ascend=0):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For centralfitestoque, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=1)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
if ascend:
siblings=1
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(
include_self=1, descend=1, siblings=0, ascend=0,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=1, descend=descend,
siblings=0, ascend=0,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None,
include_self=0, descend=1, siblings=0, ascend=0):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
if sys.version_info < (3,):
class reprunicode(unicode):
"""
A class that removes the initial u from unicode's repr.
"""
def __repr__(self):
return unicode.__repr__(self)[1:]
else:
reprunicode = unicode
class Text(Node, reprunicode):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
if sys.version_info > (3,):
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
if isinstance(data, bytes):
raise TypeError('expecting str data, not bytes')
return reprunicode.__new__(cls, data)
else:
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
return reprunicode.__new__(cls, data)
def __init__(self, data, rawsource=''):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def shortrepr(self, maxlen=18):
data = self
if len(data) > maxlen:
data = data[:maxlen-4] + ' ...'
return '<%s: %s>' % (self.tagname, repr(reprunicode(data)))
def __repr__(self):
return self.shortrepr(maxlen=68)
def _dom_node(self, domroot):
return domroot.createTextNode(unicode(self))
def astext(self):
return reprunicode(self)
# Note about __unicode__: The implementation of __unicode__ here,
# and the one raising NotImplemented in the superclass Node had
# to be removed when changing Text to a subclass of unicode instead
# of UserString, since there is no way to delegate the __unicode__
# call to the superclass unicode:
# unicode itself does not have __unicode__ method to delegate to
# and calling unicode(self) or unicode.__new__ directly creates
# an infinite loop
def copy(self):
return self.__class__(reprunicode(self), rawsource=self.rawsource)
def deepcopy(self):
return self.copy()
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
# rstrip and lstrip are used by substitution definitions where
# they are expected to return a Text instance, this was formerly
# taken care of by UserString. Note that then and now the
# rawsource member is lost.
def rstrip(self, chars=None):
return self.__class__(reprunicode.rstrip(self, chars))
def lstrip(self, chars=None):
return self.__class__(reprunicode.lstrip(self, chars))
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
list_attributes = ('ids', 'classes', 'names', 'dupnames', 'backrefs')
"""List attributes, automatically initialized to empty lists for
all nodes."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Initialize list attributes.
for att in self.list_attributes:
self.attributes[att] = []
for att, value in attributes.items():
att = att.lower()
if att in self.list_attributes:
# mutable list; make a copy for this node
self.attributes[att] = value[:]
else:
self.attributes[att] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, list):
value = ' '.join([serial_escape('%s' % v) for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join(self['names']))
else:
return '<%s...>' % self.tagname
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([unicode(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
if sys.version_info > (3,):
# 2to3 doesn't convert __unicode__ to __str__
__str__ = __unicode__
def starttag(self):
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append(name)
elif isinstance(value, list):
values = [serial_escape('%s' % v) for v in value]
parts.append('%s="%s"' % (name, ' '.join(values)))
else:
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __contains__(self, key):
# support both membership test for children and attributes
# (has_key is translated to "in" by 2to3)
if isinstance(key, basestring):
return key in self.attributes
return key in self.children
def __getitem__(self, key):
if isinstance(key, basestring):
return self.attributes[key]
elif isinstance(key, int):
return self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, basestring):
self.attributes[str(key)] = item
elif isinstance(key, int):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, basestring):
del self.attributes[key]
elif isinstance(key, int):
del self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return attr in self.attributes
def delattr(self, attr):
if attr in self.attributes:
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
# support operator in
__contains__ = hasattr
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
if self[key] == [] and key in self.list_attributes:
return 0
else:
return 1
def update_basic_atts(self, dict):
"""
Update basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') from node or dictionary `dict`.
"""
if isinstance(dict, Node):
dict = dict.attributes
for att in ('ids', 'classes', 'names', 'dupnames'):
for value in dict.get(att, []):
if not value in self[att]:
self[att].append(value)
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def replace_self(self, new):
"""
Replace `self` node with `new`, where `new` is a node or a
list of nodes.
"""
update = new
if not isinstance(new, Node):
# `new` is a list; update first child.
try:
update = new[0]
except IndexError:
update = None
if isinstance(update, Element):
update.update_basic_atts(self)
else:
# `update` is a Text node or `new` is an empty list.
# Assert that we aren't losing any attributes.
for att in ('ids', 'names', 'classes', 'dupnames'):
assert not self[att], \
'Losing "%s" attribute: %s' % (att, self[att])
self.parent.replace(self, new)
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(rawsource=self.rawsource, **self.attributes)
def deepcopy(self):
copy = self.copy()
copy.extend([child.deepcopy() for child in self.children])
return copy
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
indirect_reference_name = None
"""Holds the whitespace_normalized_name (contains mixed case) of a target.
Required for MoinMoin/reST compatibility."""
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def __getstate__(self):
"""
Return dict with unpicklable references removed.
"""
state = self.__dict__.copy()
state['reporter'] = None
state['transformer'] = None
return state
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if id in self.ids and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and id not in self.ids:
break
else:
id = ''
while not id or id in self.ids:
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed ("implicit"-type system
messages are INFO/1, and "explicit"-type system messages are ERROR/3):
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
- - explicit - - new True
- - implicit - - new False
None False explicit - - new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if name in self.nameids:
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
if old_node['names'] \
and 'refuri' in old_node \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return name in self.nameids
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=1)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
subref['refname'] = whitespace_normalize_name(refname)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class container(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print 'system_message: children=%r' % (children,)
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For centralfitestoque, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, list) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact container copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
class StopTraversal(TreePruningException):
"""
Stop the traversal alltogether. The current node's ``depart_...`` method
is not affected. The parent nodes ``depart_...`` methods are also called
as usual. No other nodes are visited. This is an alternative to
NodeFound that does not cause exception handling to trickle up to the
caller.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = string.lower()
if not isinstance(id, unicode):
id = id.decode()
id = id.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters.
# 'ascii' lowercase to prevent problems with turkish locale.
id = unicodedata.normalize('NFKD', id).\
encode('ascii', 'ignore').decode('ascii')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
_non_id_translate = {
0x00f8: u'o', # o with stroke
0x0111: u'd', # d with stroke
0x0127: u'h', # h with stroke
0x0131: u'i', # dotless i
0x0142: u'l', # l with stroke
0x0167: u't', # t with stroke
0x0180: u'b', # b with stroke
0x0183: u'b', # b with topbar
0x0188: u'c', # c with hook
0x018c: u'd', # d with topbar
0x0192: u'f', # f with hook
0x0199: u'k', # k with hook
0x019a: u'l', # l with bar
0x019e: u'n', # n with long right leg
0x01a5: u'p', # p with hook
0x01ab: u't', # t with palatal hook
0x01ad: u't', # t with hook
0x01b4: u'y', # y with hook
0x01b6: u'z', # z with stroke
0x01e5: u'g', # g with stroke
0x0225: u'z', # z with hook
0x0234: u'l', # l with curl
0x0235: u'n', # n with curl
0x0236: u't', # t with curl
0x0237: u'j', # dotless j
0x023c: u'c', # c with stroke
0x023f: u's', # s with swash tail
0x0240: u'z', # z with swash tail
0x0247: u'e', # e with stroke
0x0249: u'j', # j with stroke
0x024b: u'q', # q with hook tail
0x024d: u'r', # r with stroke
0x024f: u'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00df: u'sz', # ligature sz
0x00e6: u'ae', # ae
0x0153: u'oe', # ligature oe
0x0238: u'db', # db digraph
0x0239: u'qp', # qp digraph
}
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
def serial_escape(value):
"""Escape string values that are elements of a list, for serialization."""
return value.replace('\\', r'\\').replace(' ', r'\ ')
#
#
# Local Variables:
# indent-tabs-mode: nil
# sentence-end-double-space: t
# fill-column: 78
# End:
| akiokio/centralfitestoque | src/.pycharm_helpers/docutils/nodes.py | Python | bsd-2-clause | 64,876 | [
"VisIt"
] | dad2253736a10d4bda25d80e251b6fc99802b1c6f2a66d32250a36d377d37a8a |
from django.test import TestCase
from restclients.upass import get_upass_url, get_upass_status
from restclients.models.upass import UPassStatus
from restclients.exceptions import DataFailureException
from restclients.test import fdao_upass_override
@fdao_upass_override
class UPassTest(TestCase):
def test_javerage(self):
status = get_upass_status("javerage")
self.assertTrue(status.is_current)
self.assertTrue(status.is_student)
self.assertFalse(status.is_employee)
status_json = status.json_data()
self.assertIsNotNone(status_json['status_message'])
self.assertTrue(status_json['is_current'])
self.assertFalse(status_json['is_employee'])
self.assertIsNotNone(str(status))
status = get_upass_status("javeragefac")
self.assertTrue(status.is_current)
self.assertTrue(status.is_employee)
status = get_upass_status("phil")
self.assertFalse(status.is_current)
self.assertFalse(status.is_student)
self.assertRaises(DataFailureException,
get_upass_status,
"none")
self.assertRaises(Exception,
get_upass_status,
"jerror")
def test_get_url(self):
self.assertEquals(get_upass_url("javerage"),
"/MyUWUpass/MyUWUpass.aspx?id=javerage")
def test_message_parsing(self):
fac_message = ("<p><span class='highlight'>Your Faculty/Staff U-PASS"
" Membership is current.</span></p><p>It can take 24 to"
" 48 hours after purchase or Husky Card replacement"
" for your U-PASS to be transmitted to ORCA readers."
" You must tap your card on an ORCA reader within 60"
" days from purchase or receiving a replacement Husky"
" Card. This updates your smart chip and finalizes"
" activation of your U-PASS.</p><p><a"
" href='http://www.washington.edu/u-pass'>Learn more"
"</a> about U-PASS program member benefits, finalizing"
" activation, and the U-PASS terms of use.</p>")
stu_message = ("<p><span class='highlight'>Your Student U-PASS "
"Membership is current.</span></p><p>It can take 24 "
"to 48 hours after issuance or Husky Card replacement "
"for your U-PASS to be transmitted to ORCA readers. "
"You must tap your card on an ORCA reader within 60 "
"days from U-PASS issuance or receiving a replacement "
"Husky Card. This updates your smart chip and "
"finalizes activation of your U-PASS.</p><p><a "
"href='http://www.washington.edu/u-pass'>Learn more</a>"
" about U-PASS program member benefits and finalizing "
"activation.</p>")
not_current = ("<p><span class='highlight'>Your U-PASS is not current."
"</span></p><p>"
"<a href='http://www.washington.edu/u-pass'>Learn more"
"</a> about U-PASS program member benefits.</p>")
nc_status = UPassStatus.create(not_current)
self.assertFalse(nc_status.is_current)
self.assertFalse(nc_status.is_employee)
self.assertFalse(nc_status.is_student)
stu_status = UPassStatus.create(stu_message)
self.assertTrue(stu_status.is_current)
self.assertFalse(stu_status.is_employee)
self.assertTrue(stu_status.is_student)
fac_status = UPassStatus.create(fac_message)
self.assertTrue(fac_status.is_current)
self.assertTrue(fac_status.is_employee)
self.assertFalse(fac_status.is_student)
| uw-it-aca/uw-restclients | restclients/test/upass.py | Python | apache-2.0 | 3,953 | [
"ORCA"
] | 0821fe0b06179379df018a760144bc77a8671315fa430403e6ac54df90285077 |
# Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2009 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy
from zeobuilder import context
from zeobuilder.filters import LoadFilter, DumpFilter, FilterError
from zeobuilder.nodes.glcontainermixin import GLContainerMixin
import zeobuilder.authors as authors
from molmod.data.periodic import periodic
from molmod.units import angstrom, deg
class LoadG03ZMAT(LoadFilter):
authors = [authors.toon_verstraelen]
def __init__(self):
LoadFilter.__init__(self, "The Gaussian Z-Matrix format (*.g03zmat)")
def __call__(self, f):
Universe = context.application.plugins.get_node("Universe")
universe = Universe()
Folder = context.application.plugins.get_node("Folder")
folder = Folder()
Atom = context.application.plugins.get_node("Atom")
Point = context.application.plugins.get_node("Point")
# read the z-matrix
symbols = []
labels = []
indices = []
for line in f:
words = line.split()
if len(words) == 0:
break
if len(symbols) < 3 and len(words) != 2*len(symbols)+1:
raise FilterError("The number of fields in the first three lines is incorrect.")
if len(symbols) >= 3 and len(words) != 7:
raise FilterError("Each line in the z-matrix must contain 7 fields, except for the first three lines.")
symbols.append(words[0])
try:
indices.append(tuple(int(word)-1 for word in words[1::2]))
except ValueError:
raise FilterError("Indices in the z-matrix must be integers")
labels.append(tuple(words[2::2]))
# read the label-value map
mapping = {}
for line in f:
words = line.split()
if len(words) == 0:
break
if len(words) != 2:
raise FilterError("The label-value mapping below the z-matrix must have two fields on each line.")
try:
mapping[words[0]] = float(words[1])
except ValueError:
raise FilterError("The second field in the label-value mapping below the z-matrix must be a floating point value.")
# convert labels to values
values = []
for row in labels:
tmp = []
for label in row:
value = mapping.get(label)
if value is None:
try:
value = float(label)
except ValueError:
raise FilterError("Could not look up the label '%s' and could not convert it to a floating point value." % label)
if len(tmp) == 0:
value *= angstrom # convert distances to atomic units
else:
value *= deg # convert angles to radians
tmp.append(value)
values.append(tmp)
# now turn all this into cartesian coordinates.
coordinates = numpy.zeros((len(symbols),3),float)
# special cases for the first coordinates
coordinates[1,2] = values[1][0]
delta_z = numpy.sign(coordinates[indices[2][1],2] - coordinates[indices[2][0],2])
coordinates[2,2] = values[2][0]*delta_z*numpy.cos(values[2][1])
coordinates[2,1] = values[2][0]*delta_z*numpy.sin(values[2][1])
coordinates[2] += coordinates[indices[2][0]]
for i, irow, vrow in zip(xrange(3,len(indices)), indices[3:], values[3:]):
tmp_z = coordinates[irow[1]] - coordinates[irow[0]]
tmp_z /= numpy.linalg.norm(tmp_z)
tmp_x = coordinates[irow[2]] - coordinates[irow[1]]
tmp_x -= tmp_z*numpy.dot(tmp_z, tmp_x)
tmp_x /= numpy.linalg.norm(tmp_x)
tmp_y = numpy.cross(tmp_z, tmp_x)
x = vrow[0]*numpy.cos(vrow[2])*numpy.sin(vrow[1])
y = vrow[0]*numpy.sin(vrow[2])*numpy.sin(vrow[1])
z = vrow[0]*numpy.cos(vrow[1])
coordinates[i] = x*tmp_x + y*tmp_y + z*tmp_z + coordinates[irow[0]]
for i, symbol, coordinate in zip(xrange(len(symbols)), symbols, coordinates):
extra = {"index": i}
atom_record = periodic[symbol]
if atom_record is None:
atom = Point(name=symbol, extra=extra)
else:
atom = Atom(name=symbol, number=atom_record.number, extra=extra)
atom.transformation.t[:] = coordinate
universe.add(atom)
return [universe, folder]
load_filters = {
"g03zmat": LoadG03ZMAT(),
}
| woutersmet/Zeosummer | share/plugins/molecular/g03zmat.py | Python | gpl-3.0 | 5,998 | [
"Gaussian"
] | f66e49628301dec9136dfc4f36775c253d8d5e7483f640f82ed9e172adf72f19 |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
# Daniel McCloy <dan.mccloy@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import sys
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import numpy as np
from scipy import sparse
from ..defaults import HEAD_SIZE_DEFAULT, _handle_default
from ..transforms import _frame_to_str
from ..utils import (verbose, logger, warn,
_check_preload, _validate_type, fill_doc, _check_option)
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import anonymize_info, Info, MontageMixin, create_info
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _contains_ch_type,
channel_indices_by_type, pick_channels, _picks_to_idx,
_get_channel_types, get_channel_type_constants)
from ..io.write import DATE_NONE
from ..io._digitization import _get_data_as_dict_from_dig
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
have_helmet = True
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
nmag = np.sum(
[c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
# Our helmet does not match very well, so let's just create it
have_helmet = False
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
have_helmet = False
break
else:
system = 'unknown'
have_helmet = False
return system, have_helmet
def _get_ch_type(inst, ch_type, allow_ref_meg=False):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd',
'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude',
'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr',
'ecog', 'seeg']
allowed_types += ['ref_meg'] if allow_ref_meg else []
for type_ in allowed_types:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(instances, copy=True, verbose=None):
"""Equalize channel picks and ordering across multiple MNE-Python objects.
First, all channels that are not common to each object are dropped. Then,
using the first object in the list as a template, the channels of each
object are re-ordered to match the template. The end result is that all
given objects define the same channels, in the same order.
Parameters
----------
instances : list
A list of MNE-Python objects to equalize the channels for. Objects can
be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance,
CrossSpectralDensity or Info.
copy : bool
When dropping and/or re-ordering channels, an object will be copied
when this parameter is set to ``True``. When set to ``False`` (the
default) the dropping and re-ordering of channels happens in-place.
.. versionadded:: 0.20.0
%(verbose)s
Returns
-------
equalized_instances : list
A list of MNE-Python objects that have the same channels defined in the
same order.
Notes
-----
This function operates inplace.
"""
from ..cov import Covariance
from ..io.base import BaseRaw
from ..io.meas_info import Info
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..forward import Forward
from ..time_frequency import _BaseTFR, CrossSpectralDensity
# Instances need to have a `ch_names` attribute and a `pick_channels`
# method that supports `ordered=True`.
allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward,
Covariance, CrossSpectralDensity, Info)
allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, "
"CrossSpectralDensity or Info")
for inst in instances:
_validate_type(inst, allowed_types, "Instances to be modified",
allowed_types_str)
chan_template = instances[0].ch_names
logger.info('Identifying common channels ...')
channels = [set(inst.ch_names) for inst in instances]
common_channels = set(chan_template).intersection(*channels)
all_channels = set(chan_template).union(*channels)
dropped = list(set(all_channels - common_channels))
# Preserve the order of chan_template
order = np.argsort([chan_template.index(ch) for ch in common_channels])
common_channels = np.array(list(common_channels))[order].tolist()
# Update all instances to match the common_channels list
reordered = False
equalized_instances = []
for inst in instances:
# Only perform picking when needed
if inst.ch_names != common_channels:
if copy:
inst = inst.copy()
inst.pick_channels(common_channels, ordered=True)
if len(inst.ch_names) == len(common_channels):
reordered = True
equalized_instances.append(inst)
if dropped:
logger.info('Dropped the following channels:\n%s' % dropped)
elif reordered:
logger.info('Channels have been re-ordered.')
return equalized_instances
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
@fill_doc
def get_channel_types(self, picks=None, unique=False, only_data_chs=False):
"""Get a list of channel type for each channel.
Parameters
----------
%(picks_all)s
unique : bool
Whether to return only unique channel types. Default is ``False``.
only_data_chs : bool
Whether to ignore non-data channels. Default is ``False``.
Returns
-------
channel_types : list
The channel types.
"""
return _get_channel_types(self.info, picks=picks, unique=unique,
only_data_chs=only_data_chs)
@fill_doc
def get_montage(self):
"""Get a DigMontage from instance.
Returns
-------
%(montage)s
"""
from ..channels.montage import make_dig_montage
if self.info['dig'] is None:
return None
# obtain coord_frame, and landmark coords
# (nasion, lpa, rpa, hsp, hpi) from DigPoints
montage_bunch = _get_data_as_dict_from_dig(self.info['dig'])
coord_frame = _frame_to_str.get(montage_bunch.coord_frame)
# get the channel names and chs data structure
ch_names, chs = self.info['ch_names'], self.info['chs']
picks = pick_types(self.info, meg=False, eeg=True,
seeg=True, ecog=True)
# channel positions from dig do not match ch_names one to one,
# so use loc[:3] instead
ch_pos = {ch_names[ii]: chs[ii]['loc'][:3] for ii in picks}
# create montage
montage = make_dig_montage(
ch_pos=ch_pos,
coord_frame=coord_frame,
nasion=montage_bunch.nasion,
lpa=montage_bunch.lpa,
rpa=montage_bunch.rpa,
hsp=montage_bunch.hsp,
hpi=montage_bunch.hpi,
)
return montage
channel_type_constants = get_channel_type_constants()
_human2fiff = {k: v.get('kind', FIFF.FIFFV_COIL_NONE) for k, v in
channel_type_constants.items()}
_human2unit = {k: v.get('unit', FIFF.FIFF_UNIT_NONE) for k, v in
channel_type_constants.items()}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA',
FIFF.FIFF_UNIT_CEL: 'C'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(MontageMixin):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
ch_type='auto', forward=None, verbose=None):
"""Specify which reference to use for EEG data.
Use this function to explicitly specify the desired reference for EEG.
This can be either an existing electrode or a new virtual channel.
This function will re-reference the data according to the desired
reference.
Parameters
----------
%(set_eeg_reference_ref_channels)s
%(set_eeg_reference_projection)s
%(set_eeg_reference_ch_type)s
%(set_eeg_reference_forward)s
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
%(set_eeg_reference_see_also_notes)s
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection, ch_type=ch_type,
forward=forward)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : str | list | slice | None
None gets good data indices.
Notes
-----
.. versionadded:: 0.9.0
"""
picks = _picks_to_idx(self.info, picks)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float64)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
@verbose
def set_channel_types(self, mapping, verbose=None):
"""Define the sensor type of channels.
Parameters
----------
mapping : dict
A dictionary mapping a channel to a sensor type (str), e.g.,
``{'EEG061': 'eog'}``.
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,
hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude,
fnirs_fd_phase, fnirs_od
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
elif ch_type == 'fnirs_cw_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE
elif ch_type == 'fnirs_fd_ac_amplitude':
coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE
elif ch_type == 'fnirs_fd_phase':
coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE
elif ch_type == 'fnirs_od':
coil_type = FIFF.FIFFV_COIL_FNIRS_OD
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
return self
@fill_doc
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
%(rename_channels_mapping)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The instance (modified in place).
.. versionchanged:: 0.20
Return the instance.
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
return self
@verbose
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True, sphere=None,
verbose=None):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,
eeg, seeg and ecog channels are plotted. If None (default), then
channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%%s)' %% ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
%(topomap_sphere_auto)s
%(verbose_meth)s
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show, sphere=sphere, verbose=verbose)
@verbose
def anonymize(self, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
Parameters
----------
%(anonymize_info_parameters)s
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified instance.
Notes
-----
%(anonymize_info_notes)s
.. versionadded:: 0.13.0
"""
anonymize_info(self.info, daysback=daysback, keep_his=keep_his,
verbose=verbose)
self.set_meas_date(self.info['meas_date']) # unify annot update
return self
def set_meas_date(self, meas_date):
"""Set the measurement start date.
Parameters
----------
meas_date : datetime | float | tuple | None
The new measurement date.
If datetime object, it must be timezone-aware and in UTC.
A tuple of (seconds, microseconds) or float (alias for
``(meas_date, 0)``) can also be passed and a datetime
object will be automatically created. If None, will remove
the time reference.
Returns
-------
inst : instance of Raw | Epochs | Evoked
The modified raw instance. Operates in place.
See Also
--------
mne.io.Raw.anonymize
Notes
-----
If you want to remove all time references in the file, call
:func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>`
after calling ``inst.set_meas_date(None)``.
.. versionadded:: 0.20
"""
from ..annotations import _handle_meas_date
meas_date = _handle_meas_date(meas_date)
self.info['meas_date'] = meas_date
# clear file_id and meas_id if needed
if meas_date is None:
for key in ('file_id', 'meas_id'):
value = self.info.get(key)
if value is not None:
assert 'msecs' not in value
value['secs'] = DATE_NONE[0]
value['usecs'] = DATE_NONE[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value['machid'].copy()
_tmp[:] = 0
value['machid'] = _tmp
if hasattr(self, 'annotations'):
self.annotations._orig_time = meas_date
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=False, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False, ecog=False,
fnirs=False, csd=False, include=(), exclude='bads',
selection=None, verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include MEG channels. If string it can be 'mag', 'grad',
'planar1' or 'planar2' to select only magnetometers, all
gradiometers, or a specific type of gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg : bool | str
If True include CTF / 4D reference channels. If 'auto', reference
channels are included if compensations are present and ``meg`` is
not False. Can also be the string options for the ``meg``
parameter.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
csd : bool
EEG-CSD channels.
include : list of str
List of additional channels to include. If empty do not include
any.
exclude : list of str | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of str
Restrict sensor channels (MEG, EEG) to this list of channel names.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,
selection=selection)
self._pick_drop_channels(idx)
# remove dropped channel types from reject and flat
if getattr(self, 'reject', None) is not None:
# use list(self.reject) to avoid RuntimeError for changing
# dictionary size during iteration
for ch_type in list(self.reject):
if ch_type not in self:
del self.reject[ch_type]
if getattr(self, 'flat', None) is not None:
for ch_type in list(self.flat):
if ch_type not in self:
del self.flat[ch_type]
return self
def pick_channels(self, ch_names, ordered=False):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
ordered : bool
If True (default False), ensure that the order of the channels in
the modified instance matches the order of ``ch_names``.
.. versionadded:: 0.20.0
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered)
return self._pick_drop_channels(picks)
@fill_doc
def pick(self, picks, exclude=()):
"""Pick a subset of channels.
Parameters
----------
%(picks_all)s
exclude : list | str
Set of channels to exclude, only used when picking based on
types (e.g., exclude="bads" when picks="meg").
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
picks = _picks_to_idx(self.info, picks, 'all', exclude,
allow_empty=False)
return self._pick_drop_channels(picks)
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop channel(s).
Parameters
----------
ch_names : iterable or str
Iterable (e.g. list) of channel name(s) or channel name to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(ch_names, str):
ch_names = [ch_names]
try:
all_str = all([isinstance(ch, str) for ch in ch_names])
except TypeError:
raise ValueError("'ch_names' must be iterable, got "
"type {} ({}).".format(type(ch_names), ch_names))
if not all_str:
raise ValueError("Each element in 'ch_names' must be str, got "
"{}.".format([type(ch) for ch in ch_names]))
missing = [ch for ch in ch_names if ch not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch) for ch in ch_names
if ch in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..io import BaseRaw
from ..time_frequency import AverageTFR, EpochsTFR
msg = 'adding, dropping, or reordering channels'
if isinstance(self, BaseRaw):
if self._projector is not None:
_check_preload(self, f'{msg} after calling .apply_proj()')
else:
_check_preload(self, msg)
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if getattr(self, '_read_picks', None) is not None:
self._read_picks = [r[idx] for r in self._read_picks]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
for key in ('_comp', '_projector'):
mat = getattr(self, key, None)
if mat is not None:
setattr(self, key, mat[idx][:, idx])
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
if hasattr(self, '_data'): # skip non-preloaded Raw
self._data = self._data.take(idx, axis=axis)
else:
assert isinstance(self, BaseRaw) and not self.preload
self._pick_projs()
return self
def _pick_projs(self):
"""Keep only projectors which apply to at least 1 data channel."""
drop_idx = []
for idx, proj in enumerate(self.info['projs']):
if not set(self.info['ch_names']) & set(proj['data']['col_names']):
drop_idx.append(idx)
for idx in drop_idx:
logger.info(f"Removing projector {self.info['projs'][idx]}")
if drop_idx and hasattr(self, 'del_proj'):
self.del_proj(drop_idx)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object.
force_update_info : bool
If True, force the info for objects to be appended to match the
values in ``self``. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
If ``self`` is a Raw instance that has been preloaded into a
:obj:`numpy.memmap` instance, the memmap will be resized.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
del shapes
# Create final data / info objects
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
if isinstance(self._data, np.memmap) and con_axis == 0 and \
sys.platform != 'darwin': # resizing not available--no mremap
# Use a resize and fill in other ones
out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]
n_bytes = np.prod(out_shape) * self._data.dtype.itemsize
self._data.flush()
self._data.base.resize(n_bytes)
self._data = np.memmap(self._data.filename, mode='r+',
dtype=self._data.dtype, shape=out_shape)
assert self._data.shape == out_shape
assert self._data.nbytes == n_bytes
offset = len(data[0])
for d in data[1:]:
this_len = len(d)
self._data[offset:offset + this_len] = d
offset += this_len
else:
self._data = np.concatenate(data, axis=con_axis)
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
# We should never use these since data are preloaded, let's just
# set it to something large and likely to break (2 ** 31 - 1)
extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:])
assert all(len(r) == infos[0]['nchan'] for r in self._read_picks)
self._read_picks = [
np.concatenate([r, extra_idx]) for r in self._read_picks]
assert all(len(r) == self.info['nchan'] for r in self._read_picks)
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
origin='auto', method=None, verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of channels
using the minimum-norm method.
origin : array-like, shape (3,) | str
Origin of the sphere in the head coordinate frame and in meters.
Can be ``'auto'`` (default), which means a head-digitization-based
origin fit.
.. versionadded:: 0.17
method : dict
Method to use for each channel type.
Currently only the key "eeg" has multiple options:
- ``"spline"`` (default)
Use spherical spline interpolation.
- ``"MNE"``
Use minimum-norm projection to a sphere and back.
This is the method used for MEG channels.
The value for "meg" is "MNE", and the value for
"fnirs" is "nearest". The default (None) is thus an alias for::
method=dict(meg="MNE", eeg="spline", fnirs="nearest")
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from ..bem import _check_origin
from .interpolation import _interpolate_bads_eeg,\
_interpolate_bads_meeg, _interpolate_bads_nirs
_check_preload(self, "interpolation")
method = _handle_default('interpolation_method', method)
for key in method:
_check_option('method[key]', key, ('meg', 'eeg', 'fnirs'))
_check_option("method['eeg']", method['eeg'], ('spline', 'MNE'))
_check_option("method['meg']", method['meg'], ('MNE',))
_check_option("method['fnirs']", method['fnirs'], ('nearest',))
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
logger.info('Interpolating bad channels')
origin = _check_origin(origin, self.info)
if method['eeg'] == 'spline':
_interpolate_bads_eeg(self, origin=origin)
eeg_mne = False
else:
eeg_mne = True
_interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne)
_interpolate_bads_nirs(self)
if reset_bads is True:
self.info['bads'] = []
return self
@fill_doc
def rename_channels(info, mapping):
"""Rename channels.
.. warning:: The channel names must have at most 15 characters
Parameters
----------
info : dict
Measurement info to modify.
%(rename_channels_mapping)s
"""
_validate_type(info, Info, 'info')
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
bad_new_names = [name for _, name in new_names if len(name) > 15]
if len(bad_new_names):
raise ValueError('Channel names cannot be longer than 15 '
'characters. These channel names are not '
'valid : %s' % new_names)
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the remapping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
@fill_doc
def read_ch_adjacency(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
`FieldTrip documentation pages
<http://www.fieldtriptoolbox.org/template/neighbours/>`__.
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
%(picks_all)s
Picks Must match the template.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
find_ch_adjacency
Notes
-----
This function is closely related to :func:`find_ch_adjacency`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_adjacency` can compute the adjacency matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], str)
picks = _picks_to_idx(len(ch_names), picks)
neighbors = [_recursive_flatten(c, str) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
adjacency = _ch_neighbor_adjacency(ch_names, neighbors)
# picking before constructing matrix is buggy
adjacency = adjacency[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return adjacency, ch_names
def _ch_neighbor_adjacency(ch_names, neighbors):
"""Compute sensor adjacency matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_adjacency : scipy.sparse matrix
The adjacency matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = {c for d in neighbors for c in d}
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, str) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_adjacency = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True
ch_adjacency = sparse.csr_matrix(ch_adjacency)
return ch_adjacency
def find_ch_adjacency(info, ch_type):
"""Find the adjacency matrix for the given channels.
This function tries to infer the appropriate adjacency matrix template
for the given channels. If a template is not found, the adjacency matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : str | None
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
See Also
--------
read_ch_adjacency
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate adjacency matrix template only
works for MEG data at the moment. This means that the adjacency matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_adjacency` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
else:
_check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_neuromag_122_grad:
conn_name = 'neuromag122'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
elif n_kit_grads > 0:
from ..io.kit.constants import KIT_NEIGHBORS
conn_name = KIT_NEIGHBORS.get(info['kit_system_id'])
if conn_name is not None:
logger.info('Reading adjacency matrix for %s.' % conn_name)
return read_ch_adjacency(conn_name)
logger.info('Could not find a adjacency matrix for the data. '
'Computing adjacency based on Delaunay triangulations.')
return _compute_ch_adjacency(info, ch_type)
def _compute_ch_adjacency(info, ch_type):
"""Compute channel adjacency matrix using Delaunay triangulations.
Parameters
----------
info : instance of mne.measuerment_info.Info
The measurement info.
ch_type : str
The channel type for computing the adjacency matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels)
The adjacency matrix.
ch_names : list
The list of channel names present in adjacency matrix.
"""
from scipy.spatial import Delaunay
from .. import spatial_tris_adjacency
from ..channels.layout import _find_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute adjacency '
'matrix.')
# only for one of the pair
xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT)
else:
xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT)
tri = Delaunay(xy)
neighbors = spatial_tris_adjacency(tri.simplices)
if combine_grads:
ch_adjacency = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True
ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_adjacency = sparse.csr_matrix(ch_adjacency)
else:
ch_adjacency = sparse.lil_matrix(neighbors)
ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0]))
ch_adjacency = ch_adjacency.tocsr()
return ch_adjacency, ch_names
def fix_mag_coil_types(info, use_cal=False):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
use_cal : bool
If True, further refine the check for old coil types by checking
``info['chs'][ii]['cal']``.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of ``fix_mag_coil_types`` is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info, use_cal)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info, use_cal=False):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
# From email exchanges, systems with the larger T2 coil only use the cal
# value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10
# (Triux). So we can use a simple check for > 3e-11.
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
if use_cal:
if ch['cal'] > 3e-11:
old_mag_inds.append(ii)
else:
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}
channel_types = {ch['kind'] for ch in chs}
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_neuromag_122_grad = any(k in coil_types
for k in [FIFF.FIFFV_COIL_NM_122])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad,
has_csd_coils)
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in ``midline`` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
info : instance of Info
Where to obtain the channel names from. The picks will
be in relation to the position in ``info["ch_names"]``. If possible,
this lists will be sorted by y value position of the channel locations,
i.e., from back to front.
midline : str
Names ending in any of these characters are stored under the
``Midline`` key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
def combine_channels(inst, groups, method='mean', keep_stim=False,
drop_bad=False):
"""Combine channels based on specified channel grouping.
Parameters
----------
inst : instance of Raw, Epochs, or Evoked
An MNE-Python object to combine the channels for. The object can be of
type Raw, Epochs, or Evoked.
groups : dict
Specifies which channels are aggregated into a single channel, with
aggregation method determined by the ``method`` parameter. One new
pseudo-channel is made per dict entry; the dict values must be lists of
picks (integer indices of ``ch_names``). For example::
groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
method : str | callable
Which method to use to combine channels. If a :class:`str`, must be one
of 'mean', 'median', or 'std' (standard deviation). If callable, the
callable must accept one positional input (data of shape ``(n_channels,
n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs,
n_times)``. For example with an instance of Raw or Evoked::
method = lambda data: np.mean(data, axis=0)
Another example with an instance of Epochs::
method = lambda data: np.median(data, axis=1)
Defaults to ``'mean'``.
keep_stim : bool
If ``True``, include stimulus channels in the resulting object.
Defaults to ``False``.
drop_bad : bool
If ``True``, drop channels marked as bad before combining. Defaults to
``False``.
Returns
-------
combined_inst : instance of Raw, Epochs, or Evoked
An MNE-Python object of the same type as the input ``inst``, containing
one virtual channel for each group in ``groups`` (and, if ``keep_stim``
is ``True``, also containing stimulus channels).
"""
from ..io import BaseRaw, RawArray
from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray
ch_axis = 1 if isinstance(inst, BaseEpochs) else 0
ch_idx = list(range(inst.info['nchan']))
ch_names = inst.info['ch_names']
ch_types = inst.get_channel_types()
inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data()
groups = OrderedDict(deepcopy(groups))
# Convert string values of ``method`` into callables
# XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py
if isinstance(method, str):
method_dict = {key: partial(getattr(np, key), axis=ch_axis)
for key in ('mean', 'median', 'std')}
try:
method = method_dict[method]
except KeyError:
raise ValueError('"method" must be a callable, or one of "mean", '
f'"median", or "std"; got "{method}".')
# Instantiate channel info and data
new_ch_names, new_ch_types, new_data = [], [], []
if not isinstance(keep_stim, bool):
raise TypeError('"keep_stim" must be of type bool, not '
f'{type(keep_stim)}.')
if keep_stim:
stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True))
if stim_ch_idx:
new_ch_names = [ch_names[idx] for idx in stim_ch_idx]
new_ch_types = [ch_types[idx] for idx in stim_ch_idx]
new_data = [np.take(inst_data, idx, axis=ch_axis)
for idx in stim_ch_idx]
else:
warn('Could not find stimulus channels.')
# Get indices of bad channels
ch_idx_bad = []
if not isinstance(drop_bad, bool):
raise TypeError('"drop_bad" must be of type bool, not '
f'{type(drop_bad)}.')
if drop_bad and inst.info['bads']:
ch_idx_bad = pick_channels(ch_names, inst.info['bads'])
# Check correctness of combinations
for this_group, this_picks in groups.items():
# Check if channel indices are out of bounds
if not all(idx in ch_idx for idx in this_picks):
raise ValueError('Some channel indices are out of bounds.')
# Check if heterogeneous sensor type combinations
this_ch_type = np.array(ch_types)[this_picks]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; '
f'"{this_group}" contains types {types}.')
# Remove bad channels
these_bads = [idx for idx in this_picks if idx in ch_idx_bad]
this_picks = [idx for idx in this_picks if idx not in ch_idx_bad]
if these_bads:
logger.info('Dropped the following channels in group '
f'{this_group}: {these_bads}')
# Check if combining less than 2 channel
if len(set(this_picks)) < 2:
warn(f'Less than 2 channels in group "{this_group}" when '
f'combining by method "{method}".')
# If all good create more detailed dict without bad channels
groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0])
# Combine channels and add them to the new instance
for this_group, this_group_dict in groups.items():
new_ch_names.append(this_group)
new_ch_types.append(this_group_dict['ch_type'])
this_picks = this_group_dict['picks']
this_data = np.take(inst_data, this_picks, axis=ch_axis)
new_data.append(method(this_data))
new_data = np.swapaxes(new_data, 0, ch_axis)
info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names,
ch_types=new_ch_types)
if isinstance(inst, BaseRaw):
combined_inst = RawArray(new_data, info, first_samp=inst.first_samp,
verbose=inst.verbose)
elif isinstance(inst, BaseEpochs):
combined_inst = EpochsArray(new_data, info, events=inst.events,
tmin=inst.times[0], verbose=inst.verbose)
elif isinstance(inst, Evoked):
combined_inst = EvokedArray(new_data, info, tmin=inst.times[0],
verbose=inst.verbose)
return combined_inst
| olafhauk/mne-python | mne/channels/channels.py | Python | bsd-3-clause | 66,166 | [
"Mayavi"
] | 3d7119239190df92ee0602f0785ab37b6bd1b46bb5b901d0ad1dbfbcc3c9653b |
# $Id$
#
# Copyright (C) 2005-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
from rdkit import Chem
import sys
from rdkit.Chem import Randomize
def TestMolecule(mol):
try:
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
except ValueError as msg:
return -1
except:
import traceback
traceback.print_exc()
return -2
if mol.GetNumAtoms():
try:
Randomize.CheckCanonicalization(mol,10)
except:
import traceback
traceback.print_exc()
return -3
return 0
def TestSupplier(suppl,stopAfter=-1,reportInterval=100,reportTo=sys.stderr,
nameProp='_Name'):
nDone = 0
nFailed = 0
while 1:
try:
mol = suppl.next()
except StopIteration:
break
except:
import traceback
traceback.print_exc()
nFailed += 1
reportTo.flush()
print('Failure at mol %d'%nDone, file=reportTo)
else:
if mol:
ok = TestMolecule(mol)
else:
ok = -3
if ok<0:
nFailed += 1
reportTo.flush()
if ok==-3:
print('Canonicalization',end='',file=reportTo)
print('Failure at mol %d'%nDone,end='',file=reportTo)
if mol:
print(mol.GetProp(nameProp),end='',file=reportTo)
print('', file=reportTo)
nDone += 1
if nDone==stopAfter:
break
if not nDone%reportInterval:
print('Done %d molecules, %d failures'%(nDone,nFailed))
return nDone,nFailed
if __name__=='__main__':
suppl = Chem.SDMolSupplier(sys.argv[1],False)
if len(sys.argv)>2:
nameProp = sys.argv[2]
else:
nameProp = '_Name'
nDone,nFailed = TestSupplier(suppl,nameProp=nameProp)
print('%d failures in %d mols'%(nFailed,nDone))
| soerendip42/rdkit | rdkit/Chem/ChemUtils/BulkTester.py | Python | bsd-3-clause | 1,982 | [
"RDKit"
] | 82225f1b201aeb6769a267df23ebb3c79cce1528d0e327a374fe9650ff39a603 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Foundation import objc
from Foundation import NSBundle
from AppKit import NSImage
haskellBundleIdentifier = 'org.purl.net.mkhl.haskell'
def iconForName(name):
"""Return the NSImage instance representing a `name` item."""
bundle = NSBundle.bundleWithIdentifier_(haskellBundleIdentifier)
imgpath = bundle.pathForResource_ofType_(name, 'png')
img = NSImage.alloc().initWithContentsOfFile_(imgpath)
# Autoreleasing the image seems to randomly crash Espresso.
# img.autorelease()
return img
class HaskellModuleItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for modules"""
def isDecorator(self):
return True
def image(self):
return iconForName('module')
class HaskellTypeItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for datatypes"""
def isDecorator(self):
return True
def image(self):
return iconForName('type')
def isTextualizer(self):
return True
def title(self):
return self.text().lstrip()
class HaskellFunctionItem(objc.lookUpClass('ESBaseItem')):
"""Itemizer for functions"""
pass
class HaskellCodeBlockItem(objc.lookUpClass('ESCodeBlockItem')):
"""Itemizer for code blocks"""
def isTextualizer(self):
return True
def title(self):
return '%s %s' % (u'{…}', self.text().lstrip())
| mkhl/haskell.sugar | src/Itemizers.py | Python | mit | 1,446 | [
"ESPResSo"
] | ed8c3de4ae89e5a09a4d9331daa8e53d1fd36b3eca08a1803ef7e415faa52ef3 |
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/6')
from data_6 import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[0:121,0:35])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[0:121,35:70])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[0:121,70:105])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[0:121,105:140])
mu_obj1,sigma_obj1 = feature_to_mu_sigma(Fmat[0:121,140:141])
mu_obj2,sigma_obj2 = feature_to_mu_sigma(Fmat[0:121,141:142])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((10,2))
B_rm = np.zeros((10,2))
B_sf = np.zeros((10,2))
B_sm = np.zeros((10,2))
for num_states in range(10):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[0:121,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:121,1:5]
total_seq_rm = total_seq[0:121,36:40]
total_seq_sf = total_seq[0:121,71:75]
total_seq_sm = total_seq[0:121,106:110]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+1:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+36:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+71:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+106:j+110]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0],total_seq[0:121,2:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35],total_seq[0:121,37:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70],total_seq[0:121,72:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105],total_seq[0:121,107:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0],total_seq[0:121,j+2:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35],total_seq[0:121,j+37:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70],total_seq[0:121,j+72:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105],total_seq[0:121,j+107:j+110]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0:2],total_seq[0:121,3:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35:37],total_seq[0:121,38:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70:72],total_seq[0:121,73:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105:107],total_seq[0:121,108:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+2],total_seq[0:121,j+3:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+37],total_seq[0:121,j+38:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+72],total_seq[0:121,j+73:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+107],total_seq[0:121,j+108:j+110]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0:3],total_seq[0:121,4:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35:38],total_seq[0:121,39:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70:73],total_seq[0:121,74:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105:108],total_seq[0:121,109:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+3],total_seq[0:121,j+4:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+38],total_seq[0:121,j+39:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+73],total_seq[0:121,j+74:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+108],total_seq[0:121,j+109:j+110]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:121,0:4]
total_seq_rm = total_seq[0:121,35:39]
total_seq_sf = total_seq[0:121,70:74]
total_seq_sm = total_seq[0:121,105:109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+109]))
j = j+5
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:121,0]
total_seq_rm = total_seq[0:121,35]
total_seq_sf = total_seq[0:121,70]
total_seq_sm = total_seq[0:121,105]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = total_seq[0:121,1]
total_seq_rm = total_seq[0:121,36]
total_seq_sf = total_seq[0:121,71]
total_seq_sm = total_seq[0:121,106]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+1]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+36]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+71]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+106]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:121,2]
total_seq_rm = total_seq[0:121,37]
total_seq_sf = total_seq[0:121,72]
total_seq_sm = total_seq[0:121,107]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+2]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+37]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+72]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+107]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:121,3]
total_seq_rm = total_seq[0:121,38]
total_seq_sf = total_seq[0:121,73]
total_seq_sm = total_seq[0:121,108]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+3]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+38]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+73]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+108]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:121,4]
total_seq_rm = total_seq[0:121,39]
total_seq_sf = total_seq[0:121,74]
total_seq_sm = total_seq[0:121,109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+109]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:121,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
path_rm_obj = model_rm.viterbi(final_ts_obj)
path_sf_obj = model_sf.viterbi(final_ts_obj)
path_sm_obj = model_sm.viterbi(final_ts_obj)
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
trial_number = trial_number + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 7
i = 0
while (k < 29):
arrsum_rf[i] = np.sum(rf_final[k-7:k,0])
arrsum_rm[i] = np.sum(rm_final[k-7:k,0])
arrsum_sf[i] = np.sum(sf_final[k-7:k,0])
arrsum_sm[i] = np.sum(sm_final[k-7:k,0])
i = i+1
k = k+7
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
| tapomayukh/projects_in_python | classification/Classification_with_HMM/Single_Contact_Classification/force_codes/resolution/hmm_crossvalidation_force_6.py | Python | mit | 16,210 | [
"Gaussian",
"Mayavi"
] | e8d8fb54e57372a4dbcabf4a5f709d4f92711885cf6344356d2d991f77b4fa52 |
import sys, shutil
sys.path.insert(1, "../../../")
import h2o
def cars_checkpoint(ip,port):
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
predictors = ["displacement","power","weight","acceleration","year"]
response_col = "economy"
distribution = "gaussian"
# build first model
model1 = h2o.gbm(x=cars[predictors],y=cars[response_col],ntrees=10,max_depth=2, min_rows=10,
distribution=distribution)
# continue building the model
model2 = h2o.gbm(x=cars[predictors],y=cars[response_col],ntrees=11,max_depth=3, min_rows=9,r2_stopping=0.8,
distribution=distribution,checkpoint=model1._id)
# erroneous, not MODIFIABLE_BY_CHECKPOINT_FIELDS
# PUBDEV-1833
# learn_rate
try:
model = h2o.gbm(y=cars[response_col], x=cars[predictors],learn_rate=0.00001,distribution=distribution,
checkpoint=model1._id)
assert False, "Expected model-build to fail because learn_rate not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins_cats
try:
model = h2o.gbm(y=cars[response_col], x=cars[predictors],nbins_cats=99,distribution=distribution,
checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins_cats not modifiable by checkpoint"
except EnvironmentError:
assert True
# balance_classes
try:
model = h2o.gbm(y=cars[response_col], x=cars[predictors],balance_classes=True,distribution=distribution,
checkpoint=model1._id)
assert False, "Expected model-build to fail because balance_classes not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins
try:
model = h2o.gbm(y=cars[response_col], x=cars[predictors],nbins=99,distribution=distribution,
checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins not modifiable by checkpoint"
except EnvironmentError:
assert True
# nfolds
try:
model = h2o.gbm(y=cars[response_col], x=cars[predictors],nfolds=3,distribution=distribution,
checkpoint=model1._id)
assert False, "Expected model-build to fail because nfolds not modifiable by checkpoint"
except EnvironmentError:
assert True
if __name__ == "__main__":
h2o.run_test(sys.argv, cars_checkpoint)
| weaver-viii/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_NOPASS_error_checkpointGBM.py | Python | apache-2.0 | 2,486 | [
"Gaussian"
] | a548b778eb09b08fe0690fbec41e5aa0c287212dcaa9ab9b2bf0d6e6fa2e8fc1 |
#!/usr/bin/env python
# pmx Copyright Notice
# ============================
#
# The pmx source code is copyrighted, but you can freely use and
# copy it as long as you don't change or remove any of the copyright
# notices.
#
# ----------------------------------------------------------------------
# pmx is Copyright (C) 2006-2011 by Daniel Seeliger
#
# All Rights Reserved
#
# Permission to use, copy, modify, distribute, and distribute modified
# versions of this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both the copyright notice and
# this permission notice appear in supporting documentation, and that
# the name of Daniel Seeliger not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# DANIEL SEELIGER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL DANIEL SEELIGER BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ----------------------------------------------------------------------
import sys, os, time
from copy import deepcopy
from pmx import *
from pmx.parser import *
from pylab import *
from scipy.integrate import simps
from scipy.optimize import fmin
from scipy.special import erf
from random import gauss, randint, sample, choice
#from numpy import std
debug = True
params = {#'backend': 'ps',
# 'axes.labelsize': 10,
# 'text.fontsize': 10,
'legend.fontsize': 12,
# 'xtick.labelsize': 8,
# 'ytick.labelsize': 8,
# 'text.usetex': True,
}# 'figure.figsize': fig_size}
rcParams.update(params)
def tee( fp, s ):
print >>fp, s
print s
def cgi_error_from_mean(nruns, mu1, sig1, n1, mu2, sig2, n2):
iseq = []
for k in range(nruns):
g1 = []
g2 = []
for i in range(n1):
g1.append( gauss(mu1, sig1))
for i in range(n2):
g2.append( gauss(mu2, sig2))
m1 = average(g1)
s1 = std(g1)
m2 = average(g2)
s2 = std(g2)
p1 = 1./(s1*sqrt(2*pi))
p2 = 1./(s2*sqrt(2*pi))
iq = (m1+m2)/2.
iseq.append(iq)
mean = average(iseq)
err = std(iseq)
return err
def cgi_error(nruns, mu1, sig1, n1, mu2, sig2, n2):
iseq = []
for k in range(nruns):
g1 = []
g2 = []
for i in range(n1):
g1.append( gauss(mu1, sig1))
for i in range(n2):
g2.append( gauss(mu2, sig2))
m1 = average(g1)
s1 = std(g1)
m2 = average(g2)
s2 = std(g2)
p1 = 1./(s1*sqrt(2*pi))
p2 = 1./(s2*sqrt(2*pi))
iq = gauss_intersection([p1,m1,s1],[p2,m2,s2])
iseq.append(iq)
mean = average(iseq)
err = std(iseq)
return err
def sort_file_list( lst ):
# we assume that the directory is numbered
# guess directory base name first
dir_name = lst[0].split('/')[-2]
base_name = ''
for i, x in enumerate(dir_name):
if x.isdigit():
check = True
for k in range(i, len(dir_name)):
if not dir_name[k].isdigit():
check = False
if check:
base_name = dir_name[:i]
break
if base_name:
get_num = lambda s: int(s.split('/')[-2].split(base_name)[1])
lst.sort( lambda a, b: cmp(get_num(a), get_num(b)) )
return lst
else:
return lst
def process_dgdl( fn, ndata = -1, lambda0 = 0 ):
sys.stdout.write('\r------> %s' % fn)
sys.stdout.flush()
l = open(fn).readlines()
if not l: return None, None
r = []
for line in l:
if line[0] not in '#@&':
try:
r.append( [ float(x) for x in line.split() ] )
except:
print ' !! Skipping %s ' % (fn )
return None, None
if ndata != -1 and len(r) != ndata:
try:
print ' !! Skipping %s ( read %d data points, should be %d )' % (fn, len(r), ndata )
except:
print ' !! Skipping %s ' % (fn )
return None, None
# convert time to lambda
ndata = len( r )
dlambda = 1./ float( ndata )
if lambda0 == 1: dlambda*=-1
# if debug:
# print 'dlambda = ', dlambda
data = []
for i, (time, dgdl) in enumerate(r):
data.append( [ lambda0+i*dlambda, dgdl] )
x = map( lambda a: a[0], data )
y = map( lambda a: a[1], data )
if lambda0 == 1:
x.reverse()
y.reverse()
return simps( y, x ), ndata
def check_first_dgdl( fn, lambda0 ):
l = open(fn).readlines()
if not l: return None
r = []
for line in l:
if line[0] not in '#@&':
r.append( [ float(x) for x in line.split() ] )
ndata = len( r )
dlambda = 1./ float( ndata )
if lambda0 == 1: dlambda*=-1
print '---------------------------------------------'
print '\t\t Checking simulation data.....'
print '\t\t File: %s' % fn
print '\t\t # data points: %d' % ndata
print '\t\t Length of trajectory: %8.3f ps' % r[-1][0]
print '\t\t Delta lambda: %8.5f' % dlambda
print '---------------------------------------------'
def work_from_crooks( lst, lambda0 ):
print '\nProcessing simulation data......'
output_data = []
check_first_dgdl( lst[0], lambda0 )
first_res, ndata = process_dgdl( lst[0], lambda0 = lambda0 )
output_data.append( [ lst[0], first_res] )
results = [ first_res ]
for f in lst[1:]:
res, tmp = process_dgdl( f, ndata = ndata, lambda0 = lambda0 )
if res is not None:
results.append( res )
output_data.append( [ f, res] )
print
return results, output_data
def data_to_gauss( data ):
m = average( data )
dev = std( data )
A = 1./(dev*sqrt(2*pi))
return m, dev, A
def gauss_intersection( g1, g2 ):
A1, m1, s1 = g1
A2, m2, s2 = g2
p1 = m1/s1**2-m2/s2**2
p2 = sqrt(1/(s1**2*s2**2)*(m1-m2)**2+2*(1/s1**2-1/s2**2)*log(s2/s1))
p3 = 1/s1**2-1/s2**2
x1 = (p1+p2)/p3
x2 = (p1-p2)/p3
# determine which solution to take
if x1 > m1 and x1 < m2 or \
x1 > m2 and x1 < m1:
return x1
elif x2 > m1 and x2 < m2 or \
x2 > m2 and x2 < m1:
return x2
else:
return False # we do not take the intersection
def ksref():
f = 1
potent = 10000
lamb = arange(0.25,2.5,.001)
q=array(zeros(len(lamb),float))
res = []
for k in range(-potent,potent):
q=q+f*exp(-2.0*(k**2)*(lamb**2))
f=-f
for i in range(len(lamb)):
res.append((lamb[i],q[i]))
return res
def ksfunc(lamb):
f = 1
potent = 10000
q=0
for k in range(-potent,potent):
q=q+f*exp(-2.0*(k**2)*(lamb**2))
f*=-1
return q
def ks(data, alpha=.05, refks = None):
N = len(data)
nd, ed = edf(data)
cd = cdf(data)
siglev = 1-alpha
dval=[]
for i, val in enumerate(ed):
d = abs(val-cd[i])
dval.append(d)
if i:
d = abs(ed[i-1]-cd[i])
dval.append(d)
dmax=max(dval)
check = math.sqrt(N)*dmax
if not refks:
refks = ksref()
lst = filter(lambda x: x[1] > siglev, refks)
lam0 = lst[0][0]
if check >= lam0:
bOk = False
else:
bOk = True
q = ksfunc(check)
return (1-q), lam0, check, bOk
def edf( dg_data ):
edf_=[]
ndata=[]
data = deepcopy( dg_data )
data.sort()
N=float(len(data))
cnt=0
for item in data:
cnt+=1
edf_.append(cnt/N)
ndata.append(item)
ndata=array(ndata)
edf_=array(edf_)
return ndata,edf_
def cdf( dg_data ):
data = deepcopy( dg_data )
data.sort()
mean = average(data)
sig = std(data)
cdf=0.5*(1+erf((data-mean)/float(sig*sqrt(2))))
return cdf
def data_from_file( fn ):
data = read_and_format( fn ,'sf')
return map( lambda a: a[1], data)
def dump_integ_file( fn, data):
fp = open(fn,'w')
for fn, w in data:
print >>fp, fn, w
fp.close()
def Jarz(res, c=1.0, T=298):
kb=0.00831447215
beta = 1./(kb*T)
n = float(len(res))
mexp = 0.0
m = 0.0
m2 = 0.0
for w in res:
mexp = mexp + exp(-beta*c*w)
m = m + c*w
m2 = m2 + w*w
mexp = mexp/n
m = m/n
m2 = m2/n
var = (m2-m*m)*(n/(n-1))
dG1 = -kb*T*log(mexp) # Jarzynski estimator
dG2 = m - beta*var/2.0 # Fluctuation-Dissipation estimator
return(dG1)
def Jarz_err_boot(res, nruns, c=1.0, T=298):
out = []
n = int(len(res))
for k in range(nruns):
sys.stdout.write('\rJarzynski error bootstrap: iteration %s/%s' % (k,nruns) )
sys.stdout.flush()
for i in range(n):
val = [choice(res) for _ in xrange(n)]
foo = -1.0*Jarz(val, c, T)
out.append(foo)
sys.stdout.write('\n')
err = std(out)
return(err)
def BAR(res_ab, res_ba, T = 298):
kb=0.00831447215
beta = 1./(kb*T)
nf = float(len(res_ab))
nr = float(len(res_ba))
M = kb*T*log(nf/nr)
res_ab = array(res_ab)
res_ba = array(res_ba)
def func(x, res_ab, res_ba):
sf = 0
for v in res_ab:
sf+=1./(1+exp(beta*(M+v - x)))
sr = 0
for v in res_ba:
sr+=1./(1+exp(-beta*(M+v - x)))
#sf/=nf
#sr/=nr
r = sf-sr
return r**2
avA = average(res_ab)
avB = average(res_ba)
x0 = (avA+avB)/2.
result=fmin(func,x0 = x0, args = (res_ab, res_ba),disp=0)
return result
def BAR_err(dG, res_ab, res_ba, T = 298):
kb=0.00831447215
beta = 1./(kb*T)
res_ab = array(res_ab)
res_ba = array(res_ba)
nf = float(len(res_ab))
nr = float(len(res_ba))
M = kb*T*log(nf/nr)
err = 0
for v in res_ab:
err+= 1./(2+2*cosh(beta*(M+v-dG)))
for v in res_ba:
err+= 1./(2+2*cosh(beta*(M+v-dG)))
N = nf+nr
err/=float(N)
tot = 1/(beta**2*N)*(1./err-(N/nf+N/nr))
return sqrt(tot)
def BAR_err_boot(res_ab, res_ba, nruns, T=298):
res = []
nf = int(len(res_ab))
nr = int(len(res_ba))
for k in range(nruns):
sys.stdout.write('\rBAR error bootstrap: iteration %s/%s' % (k,nruns) )
sys.stdout.flush()
for i in range(nf):
valA = [choice(res_ab) for _ in xrange(nf)]
for i in range(nr):
valB = [choice(res_ab) for _ in xrange(nr)]
foo = BAR(valA, valB, T)
res.append(foo)
sys.stdout.write('\n')
err = std(res)
return(err)
def gauss_func( A, mean, dev, x):
x = array(x)
y = A*exp(-(((x-mean)**2)/(2.0*(dev**2))))
return y
def make_plot( fname, data1, data2, result, err, nbins, dpi ):
figure( figsize = (8, 6) )
mf, devf, Af = data_to_gauss( data1 )
mb, devb, Ab = data_to_gauss( data2 )
maxi = max( data1+data2 )
mini = min( data1+data2 )
n1, bins1, patches1 = hist(data1, range = (mini,maxi),bins=nbins, facecolor='blue', alpha=0.75, normed=True, label='0->1')
n2, bins2, patches2 = hist(data2, range = (mini,maxi),bins=nbins, facecolor='red', alpha=0.75, normed=True, label='1->0')
xlabel('W [kJ/mol]', fontsize=20)
ylabel('Probability', fontsize=20)
title(r'Work Distribution $\lambda$ 0->1 (blue) $\lambda$ 1->0 (red)')
grid(lw = 2)
loc, lab = yticks()
ll = []
for i in range(len(lab)):
ll.append("")
yticks( loc, ll )
x = arange( mini, maxi, .5 )
y1 = gauss_func( Af, mf, devf, x )
y2 = gauss_func( Ab, mb, devb, x )
plot(x, y1, 'b--', linewidth=2)
plot(x, y2, 'r--', linewidth=2)
size = max( [max(y1), max(y2)] )
res_x = [result, result ]
res_y = [0, size*1.2 ]
plot( res_x, res_y, 'k--', linewidth=2, label = r'$\Delta$G = %.2f $\pm$ %.2f kJ/mol' % (result, err))
legend(shadow=True, fancybox = True)
ylim(0, size*1.2 )
xl = gca()
for val in xl.spines.values():
val.set_lw(2)
savefig( fname, dpi= dpi )
def make_W_over_time_plot( fname, data1, data2, result, err, nbins, dpi):
figure( figsize = (8, 6) )
x1 = range( len(data1) )
x2 = range( len(data2) )
if x1>x2: x = x1
else: x = x2
mf, devf, Af = data_to_gauss( data1 )
mb, devb, Ab = data_to_gauss( data2 )
maxi = max( data1+data2 )
mini = min( data1+data2 )
sm1 = smooth( array(data1) )
sm2 = smooth( array(data2) )
subplot(1,2,1)
plot(x1,data1,'g-',linewidth=2 ,label="Forward (0->1)", alpha=.3)
plot(x1,sm1,'g-',linewidth=3)
plot(x2,data2,'b-',linewidth=2 ,label="Backward (1->0)", alpha=.3)
plot(x2,sm2,'b-',linewidth=3)
legend(shadow=True, fancybox = True, loc='upper center')
ylabel(r'W [kJ/mol]', fontsize = 20)
xlabel(r'# Snapshot', fontsize = 20)
grid(lw=2)
xlim(0,x[-1]+1)
xl = gca()
for val in xl.spines.values():
val.set_lw(2)
subplot(1,2,2)
hist(data1,bins=nbins, orientation='horizontal', facecolor='green',alpha=.75, normed=True)
hist(data2,bins=nbins, orientation='horizontal', facecolor='blue',alpha=.75, normed=True)
x = arange( mini, maxi, .5 )
y1 = gauss_func( Af, mf, devf, x )
y2 = gauss_func( Ab, mb, devb, x )
plot(y1, x, 'g--', linewidth=2)
plot(y2, x, 'b--', linewidth=2)
size = max( [max(y1), max(y2)] )
res_x = [result, result ]
res_y = [0, size*1.2 ]
plot( res_y, res_x, 'k--', linewidth=2, label = r'$\Delta$G = %.2f $\pm$ %.2f kJ/mol' % (result, err))
legend(shadow=True, fancybox = True, loc='upper center')
xticks([])
yticks([])
xl = gca()
for val in xl.spines.values():
val.set_lw(2)
subplots_adjust(wspace=0.0, hspace = 0.1)
savefig(fname, dpi=dpi)
def smooth(x,window_len=11,window='hanning'):
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval(window+'(window_len)')
y=convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
def select_random_subset( lst, n):
ret = []
idx = []
while len(ret) < n:
rn = randint(0, len(lst)-1)
if rn not in idx:
idx.append( rn )
ret.append( lst[rn])
print idx
return ret
def main(argv):
version = "1.2"
options = [
Option( "-nbins", "int", 10, "number of histograms bins for plot"),
Option( "-T", "real", 298, "Temperature for BAR calculation"),
Option( "-dpi", "int", 300, "plot resolution"),
Option( "-reverseB", "bool", False, "reverse state B"),
Option( "-firstA", "int", 0, "first trajectory to analyze (by default all values are taken)"),
Option( "-lastA", "int", 100, "last trajectory to analyze (by default all values are taken)"),
Option( "-firstB", "int", 0, "first trajectory to analyze (by default all values are taken)"),
Option( "-lastB", "int", 100, "last trajectory to analyze (by default all values are taken)"),
Option( "-rand", "int", 50, "take a random subset of trajectories"),
Option( "-integ_only", "bool", False, "Do integration only. Skip analysis."),
Option( "-KS", "bool", True, "Do Kolmogorov-Smirnov test"),
Option( "-jarz", "bool", False, "Jarzynski estimation"),
Option( "-nruns", "int", 100, "number of runs for bootstrapped BAR error"),
]
files = [
FileOption("-pa", "r/m",["xvg"],"dgdl.xvg", "paths to 0->1 runs"),
FileOption("-pb", "r/m",["xvg"],"dgdl.xvg", "paths to 1->0 runs"),
FileOption("-o", "w",["dat"],"results.dat", "results"),
FileOption("-cgi_plot", "w",["png","eps","svg","pdf"],"cgi.png", "plot work histograms "),
FileOption("-W_over_t", "w",["png","eps","svg","pdf"],"W_over_t.png", "plot work over time "),
FileOption("-i0", "r/m/o",["dat"],"integ0.dat", "read integrated W (0->1)"),
FileOption("-i1", "r/m/o",["dat"],"integ1.dat", "read integrated W (1->0)"),
FileOption("-o0", "w",["dat"],"integ0.dat", "write integrated W (0->1)"),
FileOption("-o1", "w",["dat"],"integ1.dat", "write integrated W (1->0)"),
]
help_text = ('Calculates free energies from fast growth ',
'thermodynamic integration runs.',
'First method: Crooks-Gaussian Intersection (CGI)',
'Second method: Benett Acceptance Ratio (BAR)'
)
cmdl = Commandline( argv, options = options,
fileoptions = files,
program_desc = help_text,
check_for_existing_files = False, version = version)
out = open(cmdl['-o'],'w')
print >>out, "# analyze_crooks.py, version = %s" % version
print >>out, "# pwd = %s" % os.getcwd()
print >>out, "# %s (%s)" % (time.asctime(), os.environ.get('USER') )
print >>out, "# command = %s" % ' '.join(argv)
print >>out, "#------------------------------------------------"
if not cmdl.opt['-i0'].is_set:
run_ab = cmdl['-pa']
run_ba = cmdl['-pb']
run_ab = sort_file_list( run_ab )
run_ba = sort_file_list( run_ba )
res_ab, ab_data = work_from_crooks( run_ab, lambda0 = 0 )
res_ba, ba_data = work_from_crooks( run_ba, lambda0 = 1 )
dump_integ_file( cmdl['-o0'], ab_data)
dump_integ_file( cmdl['-o1'], ba_data)
else:
res_ab = []
res_ba = []
for fn in cmdl['-i0']:
print '\t\tReading integrated values (0->1) from', fn
res_ab.extend(data_from_file( fn ) )
for fn in cmdl['-i1']:
print '\t\tReading integrated values (1->0) from', fn
res_ba.extend(data_from_file( fn ) )
if cmdl['-integ_only']:
print '\n Integration done. Skipping analysis.'
print '\n ......done........\n'
sys.exit(0)
firstA = 0
lastA = len(res_ab)
firstB = 0
lastB = len(res_ba)
if cmdl.opt['-firstA'].is_set:
firstA = cmdl['-firstA']
tee(out, ' first trajectory to read from A: %d' % firstA)
if cmdl.opt['-lastA'].is_set:
lastA = cmdl['-lastA']
tee(out, ' last trajectory to read from A : %d' % lastA)
if cmdl.opt['-firstB'].is_set:
firstB = cmdl['-firstB']
tee(out, ' first trajectory to read from B: %d' % firstB)
if cmdl.opt['-lastB'].is_set:
lastB = cmdl['-lastB']
tee(out, ' last trajectory to read from B : %d' % lastB)
res_ab = res_ab[firstA:lastA]
res_ba = res_ba[firstB:lastB]
if cmdl.opt['-rand'].is_set:
ntraj = cmdl['-rand']
tee(out, ' select random subset of trajectories: %d' % ntraj )
res_ab = select_random_subset(res_ab, ntraj)
res_ba = select_random_subset(res_ba, ntraj)
mf, devf, Af = data_to_gauss( res_ab )
mb, devb, Ab = data_to_gauss( res_ba )
tee(out, ' --------------------------------------------------------')
tee(out, ' ANALYSIS: NUMBER OF TRAJECTORIES:')
tee(out, ' 0->1 : %d' % len(res_ab))
tee(out, ' 1->0 : %d' % len(res_ba))
tee(out, ' --------------------------------------------------------')
tee(out, ' ANALYSIS: Crooks-Gaussian Intersection ')
tee(out, ' --------------------------------------------------------')
tee(out, ' Forward : mean = %8.3f std = %8.3f' % ( mf, devf ))
tee(out, ' Backward : mean = %8.3f std = %8.3f' % ( mb, devb ))
if cmdl['-KS']:
tee(out, ' Running KS-test ....')
q0, lam00, check0, bOk0 = ks(res_ab)
q1, lam01, check1, bOk1 = ks(res_ba)
tee(out, ' Forward : gaussian quality = %3.2f' % q0)
if bOk0:
tee(out, ' ---> KS-Test Ok')
else:
tee(out, ' ---> KS-Test Failed. sqrt(N)*Dmax = %4.2f, lambda0 = %4.2f' %( q0, check0 ))
tee(out, ' Backward : gaussian quality = %3.2f' % q1)
if bOk1:
tee(out, ' ---> KS-Test Ok')
else:
tee(out, ' ---> KS-Test Failed. sqrt(N)*Dmax = %4.2f, lambda0 = %4.2f' %( q1, check1 ))
tee(out, ' Calculating Intersection...')
cgi_result = gauss_intersection( [Af, mf, devf], [Ab, mb, devb ] )
intersection = True
if not cgi_result:
tee(out, '\n Gaussians too close for intersection calculation')
tee(out, ' --> Taking difference of mean values')
cgi_result = (mf+mb)*.5
intersection = False
tee(out, ' RESULT: dG ( CGI ) = %8.4f kJ/mol' % cgi_result)
if intersection:
cgi_err = cgi_error( 1000, mf, devf, len( res_ab), mb, devb, len(res_ba ) )
else:
cgi_err = cgi_error_from_mean( 1000, mf, devf, len( res_ab), mb, devb, len(res_ba ) )
tee(out, ' RESULT: error_dG ( CGI ) = %8.4f kJ/mol' % cgi_err)
tee(out, ' --------------------------------------------------------')
tee(out, ' ANALYSIS: Bennett Acceptance Ratio ')
tee(out, ' --------------------------------------------------------')
T = cmdl['-T']
tee(out, ' Solving numerical equation with Nelder-Mead Simplex algorithm.. ')
tee(out, ' Temperature used: %8.2f K' % T)
bar_result = BAR( res_ab, res_ba, T)
tee(out, ' RESULT: dG (BAR ) = %8.4f kJ/mol' % bar_result)
bar_err = BAR_err( bar_result, res_ab, res_ba, T)
bar_err_boot = BAR_err_boot( res_ab, res_ba, cmdl['-nruns'], T)
tee(out, ' RESULT: error_dG_analyt (BAR ) = %8.4f kJ/mol' % bar_err)
tee(out, ' RESULT: error_dG_bootstrap (BAR ) = %8.4f kJ/mol' % bar_err_boot)
tee(out, ' ------------------------------------------------------')
diff = abs( cgi_result - bar_result )
mean = (cgi_result+bar_result)*.5
tee(out, ' Difference between BAR and CGI = %8.5f kJ/mol' % diff )
tee(out, ' Mean of BAR and CGI = %8.5f kJ/mol' % mean )
tee(out, ' ------------------------------------------------------')
if cmdl['-jarz']:
tee(out, ' --------------------------------------------------------')
tee(out, ' ANALYSIS: Jarzynski estimator ')
tee(out, ' --------------------------------------------------------')
jarz_resultA = Jarz( res_ab, 1.0, T)
jarz_resultB = -1.0*Jarz( res_ba, -1.0, T)
tee(out, ' RESULT: dG_forward (Jarzynski) = %8.4f kJ/mol' % jarz_resultA)
tee(out, ' RESULT: dG_backward (Jarzynski) = %8.4f kJ/mol' % jarz_resultB)
jarz_err_bootA = Jarz_err_boot( res_ab, cmdl['-nruns'], 1.0, T)
jarz_err_bootB = Jarz_err_boot( res_ba, cmdl['-nruns'], -1.0, T)
# tee(out, ' RESULT: error_dG_forward (Jarzynski) = %8.4f kJ/mol' % jarz_errA)
tee(out, ' RESULT: error_dG_bootstrap_forward (Jarzynski) = %8.4f kJ/mol' % jarz_err_bootA)
# tee(out, ' RESULT: error_dG_backward (Jarzynski) = %8.4f kJ/mol' % jarz_errB)
tee(out, ' RESULT: error_dG_bootstrap_backward (Jarzynski) = %8.4f kJ/mol' % jarz_err_bootB)
tee(out, ' ------------------------------------------------------')
mean = (jarz_resultA+jarz_resultB)*.5
tee(out, ' Mean of Jarzynski foward and backward = %8.5f kJ/mol' % mean )
tee(out, ' ------------------------------------------------------')
print '\n Plotting histograms......'
make_plot( cmdl['-cgi_plot'], res_ab, res_ba, cgi_result, cgi_err, cmdl['-nbins'], cmdl['-dpi'] )
make_W_over_time_plot( cmdl['-W_over_t'], res_ab, res_ba, cgi_result, cgi_err, cmdl['-nbins'], cmdl['-dpi'])
tee(out, '\n ......done...........\n')
main( sys.argv )
| tectronics/pmx | scripts/analyze_crooks.py | Python | lgpl-3.0 | 24,517 | [
"Gaussian"
] | d2f70efd7f82db889638c4ef888e49bdaaf851e8945154d6ccb2dbaa2d0b984a |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| imaculate/scikit-learn | sklearn/tree/tree.py | Python | bsd-3-clause | 40,425 | [
"Brian"
] | 3be9abe35888aeba0a73543d606e36d249e5553c2a829de41fae578c9b66cc90 |
import json
import time
import requests
mappings = requests.get('http://firefly.ukcod.org.uk/~mark/ynr-post-mapping.json').json()
post_id_lookup = {m['old']: m['new'] for m in mappings}
mapit_ids = requests.get('https://mapit.mysociety.org/areas/WMC').json().keys()
locations = {}
for mapit_id in mapit_ids:
time.sleep(0.5)
j = requests.get('http://mapit.mysociety.org/area/%s/geometry' % mapit_id).json()
if 'centre_lat' in j:
post_id = post_id_lookup[mapit_id]
locations[post_id] = (j['centre_lat'], j['centre_lon'])
with open('locations.json', 'w') as f:
json.dump(locations, f)
| andylolz/ge2015-results-bot | gen_locations.py | Python | mit | 619 | [
"Firefly"
] | 028eba7800f4409c25173bf91ce51009f4c55f54119a00831761e8082de54def |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Tests Spack's ability to parse the name and version of a package
based on its URL.
"""
import os
import pytest
from spack.url import (
UndetectableVersionError,
parse_name_and_version,
parse_name_offset,
parse_version_offset,
strip_name_suffixes,
strip_version_suffixes,
substitute_version,
)
from spack.version import Version
@pytest.mark.parametrize('url,expected', [
# No suffix
('rgb-1.0.6', 'rgb-1.0.6'),
# Misleading prefix
('jpegsrc.v9b', 'jpegsrc.v9b'),
('turbolinux702', 'turbolinux702'),
('converge_install_2.3.16', 'converge_install_2.3.16'),
# Download type - code, source
('cistem-1.0.0-beta-source-code', 'cistem-1.0.0-beta'),
# Download type - src
('apache-ant-1.9.7-src', 'apache-ant-1.9.7'),
('go1.7.4.src', 'go1.7.4'),
# Download type - source
('bowtie2-2.2.5-source', 'bowtie2-2.2.5'),
('grib_api-1.17.0-Source', 'grib_api-1.17.0'),
# Download type - full
('julia-0.4.3-full', 'julia-0.4.3'),
# Download type - bin
('apache-maven-3.3.9-bin', 'apache-maven-3.3.9'),
# Download type - binary
('Jmol-14.8.0-binary', 'Jmol-14.8.0'),
# Download type - gem
('rubysl-date-2.0.9.gem', 'rubysl-date-2.0.9'),
# Download type - tar
('gromacs-4.6.1-tar', 'gromacs-4.6.1'),
# Download type - sh
('Miniconda2-4.3.11-Linux-x86_64.sh', 'Miniconda2-4.3.11'),
# Download version - release
('v1.0.4-release', 'v1.0.4'),
# Download version - stable
('libevent-2.0.21-stable', 'libevent-2.0.21'),
# Download version - final
('2.6.7-final', '2.6.7'),
# Download version - rel
('v1.9.5.1rel', 'v1.9.5.1'),
# Download version - orig
('dash_0.5.5.1.orig', 'dash_0.5.5.1'),
# Download version - plus
('ncbi-blast-2.6.0+-src', 'ncbi-blast-2.6.0'),
# License
('cppad-20170114.gpl', 'cppad-20170114'),
# Arch
('pcraster-4.1.0_x86-64', 'pcraster-4.1.0'),
('dislin-11.0.linux.i586_64', 'dislin-11.0'),
('PAGIT.V1.01.64bit', 'PAGIT.V1.01'),
# OS - linux
('astyle_2.04_linux', 'astyle_2.04'),
# OS - unix
('install-tl-unx', 'install-tl'),
# OS - macos
('astyle_1.23_macosx', 'astyle_1.23'),
('haxe-2.08-osx', 'haxe-2.08'),
# PyPI - wheel
('entrypoints-0.2.2-py2.py3-none-any.whl', 'entrypoints-0.2.2'),
('numpy-1.12.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl', 'numpy-1.12.0'), # noqa
# PyPI - exe
('PyYAML-3.12.win-amd64-py3.5.exe', 'PyYAML-3.12'),
# Combinations of multiple patterns - bin, release
('rocketmq-all-4.5.2-bin-release', 'rocketmq-all-4.5.2'),
# Combinations of multiple patterns - all
('p7zip_9.04_src_all', 'p7zip_9.04'),
# Combinations of multiple patterns - run
('cuda_8.0.44_linux.run', 'cuda_8.0.44'),
# Combinations of multiple patterns - file
('ack-2.14-single-file', 'ack-2.14'),
# Combinations of multiple patterns - jar
('antlr-3.4-complete.jar', 'antlr-3.4'),
# Combinations of multiple patterns - oss
('tbb44_20160128oss_src_0', 'tbb44_20160128'),
# Combinations of multiple patterns - darwin
('ghc-7.0.4-x86_64-apple-darwin', 'ghc-7.0.4'),
('ghc-7.0.4-i386-apple-darwin', 'ghc-7.0.4'),
# Combinations of multiple patterns - centos
('sratoolkit.2.8.2-1-centos_linux64', 'sratoolkit.2.8.2-1'),
# Combinations of multiple patterns - arch
('VizGlow_v2.2alpha17-R21November2016-Linux-x86_64-Install',
'VizGlow_v2.2alpha17-R21November2016'),
('jdk-8u92-linux-x64', 'jdk-8u92'),
('cuda_6.5.14_linux_64.run', 'cuda_6.5.14'),
('Mathematica_12.0.0_LINUX.sh', 'Mathematica_12.0.0'),
('trf407b.linux64', 'trf407b'),
# Combinations of multiple patterns - with
('mafft-7.221-with-extensions-src', 'mafft-7.221'),
('spark-2.0.0-bin-without-hadoop', 'spark-2.0.0'),
('conduit-v0.3.0-src-with-blt', 'conduit-v0.3.0'),
# Combinations of multiple patterns - rock
('bitlib-23-2.src.rock', 'bitlib-23-2'),
# Combinations of multiple patterns - public
('dakota-6.3-public.src', 'dakota-6.3'),
# Combinations of multiple patterns - universal
('synergy-1.3.6p2-MacOSX-Universal', 'synergy-1.3.6p2'),
# Combinations of multiple patterns - dynamic
('snptest_v2.5.2_linux_x86_64_dynamic', 'snptest_v2.5.2'),
# Combinations of multiple patterns - other
('alglib-3.11.0.cpp.gpl', 'alglib-3.11.0'),
('hpcviewer-2019.08-linux.gtk.x86_64', 'hpcviewer-2019.08'),
('apache-mxnet-src-1.3.0-incubating', 'apache-mxnet-src-1.3.0'),
])
def test_url_strip_version_suffixes(url, expected):
stripped = strip_version_suffixes(url)
assert stripped == expected
@pytest.mark.parametrize('url,version,expected', [
# No suffix
('rgb-1.0.6', '1.0.6', 'rgb'),
('nauty26r7', '26r7', 'nauty'),
('PAGIT.V1.01', '1.01', 'PAGIT'),
('AmpliconNoiseV1.29', '1.29', 'AmpliconNoise'),
# Download type - install
('converge_install_2.3.16', '2.3.16', 'converge'),
# Download type - src
('jpegsrc.v9b', '9b', 'jpeg'),
('blatSrc35', '35', 'blat'),
# Download type - open
('RepeatMasker-open-4-0-7', '4-0-7', 'RepeatMasker'),
# Download type - archive
('coinhsl-archive-2014.01.17', '2014.01.17', 'coinhsl'),
# Download type - std
('ghostscript-fonts-std-8.11', '8.11', 'ghostscript-fonts'),
# Download type - bin
('GapCloser-bin-v1.12-r6', '1.12-r6', 'GapCloser'),
# Download type - software
('orthomclSoftware-v2.0.9', '2.0.9', 'orthomcl'),
# Download version - release
('cbench_release_1.3.0.tar.gz', '1.3.0', 'cbench'),
# Download version - snapshot
('gts-snapshot-121130', '121130', 'gts'),
# Download version - distrib
('zoltan_distrib_v3.83', '3.83', 'zoltan'),
# Download version - latest
('Platypus-latest', 'N/A', 'Platypus'),
# Download version - complex
('qt-everywhere-opensource-src-5.7.0', '5.7.0', 'qt'),
# Arch
('VESTA-x86_64', '3.4.6', 'VESTA'),
# VCS - bazaar
('libvterm-0+bzr681', '681', 'libvterm'),
# License - gpl
('PyQt-x11-gpl-4.11.3', '4.11.3', 'PyQt'),
('PyQt4_gpl_x11-4.12.3', '4.12.3', 'PyQt4'),
])
def test_url_strip_name_suffixes(url, version, expected):
stripped = strip_name_suffixes(url, version)
assert stripped == expected
@pytest.mark.parametrize('name,noffset,ver,voffset,path', [
# Name in path
('antlr', 25, '2.7.7', 40, 'https://github.com/antlr/antlr/tarball/v2.7.7'),
# Name in stem
('gmp', 32, '6.0.0a', 36, 'https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2'),
# Name in suffix
# Don't think I've ever seen one of these before
# We don't look for it, so it would probably fail anyway
# Version in path
('nextflow', 31, '0.20.1', 59, 'https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow'),
# Version in stem
('zlib', 24, '1.2.10', 29, 'http://zlib.net/fossils/zlib-1.2.10.tar.gz'),
('slepc', 51, '3.6.2', 57, 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz'),
('cloog', 61, '0.18.1', 67, 'http://www.bastoul.net/cloog/pages/download/count.php3?url=./cloog-0.18.1.tar.gz'),
('libxc', 58, '2.2.2', 64, 'http://www.tddft.org/programs/octopus/down.php?file=libxc/libxc-2.2.2.tar.gz'),
# Version in suffix
('swiftsim', 36, '0.3.0', 76, 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0'),
('swiftsim', 55, '0.3.0', 95, 'https://gitlab.cosma.dur.ac.uk/api/v4/projects/swift%2Fswiftsim/repository/archive.tar.gz?sha=v0.3.0'),
('sionlib', 30, '1.7.1', 59, 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1'),
# Regex in name
('voro++', 40, '0.4.6', 47, 'http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz'),
# SourceForge download
('glew', 55, '2.0.0', 60, 'https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download'),
])
def test_url_parse_offset(name, noffset, ver, voffset, path):
"""Tests that the name, version and offsets are computed correctly.
Args:
name (str): expected name
noffset (int): name offset
ver (str): expected version
voffset (int): version offset
path (str): url to be parsed
"""
# Make sure parse_name_offset and parse_name_version are working
v, vstart, vlen, vi, vre = parse_version_offset(path)
n, nstart, nlen, ni, nre = parse_name_offset(path, v)
assert n == name
assert v == ver
assert nstart == noffset
assert vstart == voffset
@pytest.mark.parametrize('name,version,url', [
# Common Repositories - github downloads
# name/archive/ver.ver
('nco', '4.6.2', 'https://github.com/nco/nco/archive/4.6.2.tar.gz'),
# name/archive/vver.ver
('vim', '8.0.0134', 'https://github.com/vim/vim/archive/v8.0.0134.tar.gz'),
# name/archive/name-ver.ver
('oce', '0.18', 'https://github.com/tpaviot/oce/archive/OCE-0.18.tar.gz'),
# name/releases/download/vver/name-ver.ver
('libmesh', '1.0.0', 'https://github.com/libMesh/libmesh/releases/download/v1.0.0/libmesh-1.0.0.tar.bz2'),
# name/tarball/vver.ver
('git', '2.7.1', 'https://github.com/git/git/tarball/v2.7.1'),
# name/zipball/vver.ver
('git', '2.7.1', 'https://github.com/git/git/zipball/v2.7.1'),
# Common Repositories - gitlab downloads
# name/repository/archive.ext?ref=vver.ver
('swiftsim', '0.3.0',
'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0'),
# /api/v4/projects/NAMESPACE%2Fname/repository/archive.ext?sha=vver.ver
('swiftsim', '0.3.0',
'https://gitlab.cosma.dur.ac.uk/api/v4/projects/swift%2Fswiftsim/repository/archive.tar.gz?sha=v0.3.0'),
# name/repository/archive.ext?ref=name-ver.ver
('icet', '1.2.3',
'https://gitlab.kitware.com/icet/icet/repository/archive.tar.gz?ref=IceT-1.2.3'),
# /api/v4/projects/NAMESPACE%2Fname/repository/archive.ext?sha=name-ver.ver
('icet', '1.2.3',
'https://gitlab.kitware.com/api/v4/projects/icet%2Ficet/repository/archive.tar.bz2?sha=IceT-1.2.3'),
# Common Repositories - bitbucket downloads
# name/get/ver.ver
('eigen', '3.2.7', 'https://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2'),
# name/get/vver.ver
('hoomd-blue', '1.3.3',
'https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2'),
# name/downloads/name-ver.ver
('dolfin', '2016.1.0',
'https://bitbucket.org/fenics-project/dolfin/downloads/dolfin-2016.1.0.tar.gz'),
# Common Repositories - sourceforge downloads
# name-ver.ver
('libpng', '1.6.27',
'http://download.sourceforge.net/libpng/libpng-1.6.27.tar.gz'),
('lcms2', '2.6',
'http://downloads.sourceforge.net/project/lcms/lcms/2.6/lcms2-2.6.tar.gz'),
('modules', '3.2.10',
'http://prdownloads.sourceforge.net/modules/modules-3.2.10.tar.gz'),
# name-ver.ver.ext/download
('glew', '2.0.0',
'https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download'),
# Common Repositories - cran downloads
# name.name_ver.ver-ver.ver
('TH.data', '1.0-8', 'https://cran.r-project.org/src/contrib/TH.data_1.0-8.tar.gz'),
('knitr', '1.14', 'https://cran.rstudio.com/src/contrib/knitr_1.14.tar.gz'),
('devtools', '1.12.0', 'https://cloud.r-project.org/src/contrib/devtools_1.12.0.tar.gz'),
# Common Repositories - pypi downloads
# name.name_name-ver.ver
('3to2', '1.1.1', 'https://pypi.python.org/packages/source/3/3to2/3to2-1.1.1.zip'),
('mpmath', '0.19',
'https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz'),
('pandas', '0.16.0',
'https://pypi.python.org/packages/source/p/pandas/pandas-0.16.0.tar.gz#md5=bfe311f05dc0c351f8955fbd1e296e73'),
('sphinx_rtd_theme', '0.1.10a0',
'https://pypi.python.org/packages/da/6b/1b75f13d8aa3333f19c6cdf1f0bc9f52ea739cae464fbee050307c121857/sphinx_rtd_theme-0.1.10a0.tar.gz'),
('backports.ssl_match_hostname', '3.5.0.1',
'https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz'),
# Common Repositories - bazaar downloads
('libvterm', '681', 'http://www.leonerd.org.uk/code/libvterm/libvterm-0+bzr681.tar.gz'),
# Common Tarball Formats
# 1st Pass: Simplest case
# Assume name contains no digits and version contains no letters
# name-ver.ver
('libpng', '1.6.37', 'http://download.sourceforge.net/libpng/libpng-1.6.37.tar.gz'),
# 2nd Pass: Version only
# Assume version contains no letters
# ver.ver
('eigen', '3.2.7', 'https://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2'),
# ver.ver-ver
('ImageMagick', '7.0.2-7', 'https://github.com/ImageMagick/ImageMagick/archive/7.0.2-7.tar.gz'),
# vver.ver
('CGNS', '3.3.0', 'https://github.com/CGNS/CGNS/archive/v3.3.0.tar.gz'),
# vver_ver
('luafilesystem', '1_6_3', 'https://github.com/keplerproject/luafilesystem/archive/v1_6_3.tar.gz'),
# 3rd Pass: No separator characters are used
# Assume name contains no digits
# namever
('turbolinux', '702', 'file://{0}/turbolinux702.tar.gz'.format(os.getcwd())),
('nauty', '26r7', 'http://pallini.di.uniroma1.it/nauty26r7.tar.gz'),
# 4th Pass: A single separator character is used
# Assume name contains no digits
# name-name-ver-ver
('Trilinos', '12-10-1',
'https://github.com/trilinos/Trilinos/archive/trilinos-release-12-10-1.tar.gz'),
('panda', '2016-03-07',
'http://comopt.ifi.uni-heidelberg.de/software/PANDA/downloads/panda-2016-03-07.tar'),
('gts', '121130',
'http://gts.sourceforge.net/tarballs/gts-snapshot-121130.tar.gz'),
('cdd', '061a',
'http://www.cs.mcgill.ca/~fukuda/download/cdd/cdd-061a.tar.gz'),
# name_name_ver_ver
('tinyxml', '2_6_2',
'https://sourceforge.net/projects/tinyxml/files/tinyxml/2.6.2/tinyxml_2_6_2.tar.gz'),
('boost', '1_55_0',
'http://downloads.sourceforge.net/project/boost/boost/1.55.0/boost_1_55_0.tar.bz2'),
('yorick', '2_2_04',
'https://github.com/dhmunro/yorick/archive/y_2_2_04.tar.gz'),
('tbb', '44_20160413',
'https://www.threadingbuildingblocks.org/sites/default/files/software_releases/source/tbb44_20160413oss_src.tgz'),
# name.name.ver.ver
('prank', '150803', 'http://wasabiapp.org/download/prank/prank.source.150803.tgz'),
('jpeg', '9b', 'http://www.ijg.org/files/jpegsrc.v9b.tar.gz'),
('openjpeg', '2.1',
'https://github.com/uclouvain/openjpeg/archive/version.2.1.tar.gz'),
# name.namever.ver
('atlas', '3.11.34',
'http://sourceforge.net/projects/math-atlas/files/Developer%20%28unstable%29/3.11.34/atlas3.11.34.tar.bz2'),
('visit', '2.10.1', 'http://portal.nersc.gov/project/visit/releases/2.10.1/visit2.10.1.tar.gz'),
('geant', '4.10.01.p03', 'http://geant4.cern.ch/support/source/geant4.10.01.p03.tar.gz'),
('tcl', '8.6.5', 'http://prdownloads.sourceforge.net/tcl/tcl8.6.5-src.tar.gz'),
# 5th Pass: Two separator characters are used
# Name may contain digits, version may contain letters
# name-name-ver.ver
('m4', '1.4.17', 'https://ftp.gnu.org/gnu/m4/m4-1.4.17.tar.gz'),
('gmp', '6.0.0a', 'https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2'),
('LaunchMON', '1.0.2',
'https://github.com/LLNL/LaunchMON/releases/download/v1.0.2/launchmon-v1.0.2.tar.gz'),
# name-ver-ver.ver
('libedit', '20150325-3.1', 'http://thrysoee.dk/editline/libedit-20150325-3.1.tar.gz'),
# name-name-ver_ver
('icu4c', '57_1', 'http://download.icu-project.org/files/icu4c/57.1/icu4c-57_1-src.tgz'),
# name_name_ver.ver
('superlu_dist', '4.1', 'http://crd-legacy.lbl.gov/~xiaoye/SuperLU/superlu_dist_4.1.tar.gz'),
('pexsi', '0.9.0', 'https://math.berkeley.edu/~linlin/pexsi/download/pexsi_v0.9.0.tar.gz'),
# name_name.ver.ver
('fer', '696', 'ftp://ftp.pmel.noaa.gov/ferret/pub/source/fer_source.v696.tar.gz'),
# name_name_ver-ver
('Bridger', '2014-12-01',
'https://downloads.sourceforge.net/project/rnaseqassembly/Bridger_r2014-12-01.tar.gz'),
# name-name-ver.ver-ver.ver
('sowing', '1.1.23-p1', 'http://ftp.mcs.anl.gov/pub/petsc/externalpackages/sowing-1.1.23-p1.tar.gz'),
('bib2xhtml', '3.0-15-gf506', 'http://www.spinellis.gr/sw/textproc/bib2xhtml/bib2xhtml-v3.0-15-gf506.tar.gz'),
# namever.ver-ver.ver
('go', '1.4-bootstrap-20161024', 'https://storage.googleapis.com/golang/go1.4-bootstrap-20161024.tar.gz'),
# 6th Pass: All three separator characters are used
# Name may contain digits, version may contain letters
# name_name-ver.ver
('the_silver_searcher', '0.32.0', 'http://geoff.greer.fm/ag/releases/the_silver_searcher-0.32.0.tar.gz'),
('sphinx_rtd_theme', '0.1.10a0',
'https://pypi.python.org/packages/source/s/sphinx_rtd_theme/sphinx_rtd_theme-0.1.10a0.tar.gz'),
# name.name_ver.ver-ver.ver
('TH.data', '1.0-8', 'https://cran.r-project.org/src/contrib/TH.data_1.0-8.tar.gz'),
('XML', '3.98-1.4', 'https://cran.r-project.org/src/contrib/XML_3.98-1.4.tar.gz'),
# name-name-ver.ver_ver.ver
('pypar', '2.1.5_108',
'https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/pypar/pypar-2.1.5_108.tgz'),
# name-namever.ver_ver.ver
('STAR-CCM+', '11.06.010_02',
'file://{0}/STAR-CCM+11.06.010_02_linux-x86_64.tar.gz'.format(os.getcwd())),
# name-name_name-ver.ver
('PerlIO-utf8_strict', '0.002',
'http://search.cpan.org/CPAN/authors/id/L/LE/LEONT/PerlIO-utf8_strict-0.002.tar.gz'),
# Various extensions
# .tar.gz
('libXcursor', '1.1.14',
'https://www.x.org/archive/individual/lib/libXcursor-1.1.14.tar.gz'),
# .tar.bz2
('mpfr', '4.0.1', 'https://ftpmirror.gnu.org/mpfr/mpfr-4.0.1.tar.bz2'),
# .tar.xz
('pkgconf', '1.5.4',
'http://distfiles.dereferenced.org/pkgconf/pkgconf-1.5.4.tar.xz'),
# .tar.Z
('Gblocks', '0.91b',
'http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_Linux64_0.91b.tar.Z'),
# .tar.zip
('bcl2fastq2', '2.19.1.403',
'ftp://webdata2:webdata2@ussd-ftp.illumina.com/downloads/software/bcl2fastq/bcl2fastq2-v2.19.1.403-tar.zip'),
# .tar, .TAR
('python-meep', '1.4.2',
'https://launchpad.net/python-meep/1.4/1.4/+download/python-meep-1.4.2.tar'),
('python-meep', '1.4.2',
'https://launchpad.net/python-meep/1.4/1.4/+download/python-meep-1.4.2.TAR'),
# .gz
('libXcursor', '1.1.14',
'https://www.x.org/archive/individual/lib/libXcursor-1.1.14.gz'),
# .bz2
('mpfr', '4.0.1', 'https://ftpmirror.gnu.org/mpfr/mpfr-4.0.1.bz2'),
# .xz
('pkgconf', '1.5.4',
'http://distfiles.dereferenced.org/pkgconf/pkgconf-1.5.4.xz'),
# .Z
('Gblocks', '0.91b',
'http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_Linux64_0.91b.Z'),
# .zip
('bliss', '0.73', 'http://www.tcs.hut.fi/Software/bliss/bliss-0.73.zip'),
# .tgz
('ADOL-C', '2.6.1',
'http://www.coin-or.org/download/source/ADOL-C/ADOL-C-2.6.1.tgz'),
# .tbz
('mpfr', '4.0.1', 'https://ftpmirror.gnu.org/mpfr/mpfr-4.0.1.tbz'),
# .tbz2
('mpfr', '4.0.1', 'https://ftpmirror.gnu.org/mpfr/mpfr-4.0.1.tbz2'),
# .txz
('kim-api', '2.1.0', 'https://s3.openkim.org/kim-api/kim-api-2.1.0.txz'),
# 8th Pass: Query strings
# suffix queries
('swiftsim', '0.3.0', 'http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0'),
('swiftsim', '0.3.0',
'https://gitlab.cosma.dur.ac.uk/api/v4/projects/swift%2Fswiftsim/repository/archive.tar.gz?sha=v0.3.0'),
('sionlib', '1.7.1', 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1'),
('jube2', '2.2.2', 'https://apps.fz-juelich.de/jsc/jube/jube2/download.php?version=2.2.2'),
('archive', '1.0.0', 'https://code.ornl.gov/eck/papyrus/repository/archive.tar.bz2?ref=v1.0.0'),
('VecGeom', '0.3.rc',
'https://gitlab.cern.ch/api/v4/projects/VecGeom%2FVecGeom/repository/archive.tar.gz?sha=v0.3.rc'),
('parsplice', '1.1',
'https://gitlab.com/api/v4/projects/exaalt%2Fparsplice/repository/archive.tar.gz?sha=v1.1'),
('busco', '2.0.1', 'https://gitlab.com/api/v4/projects/ezlab%2Fbusco/repository/archive.tar.gz?sha=2.0.1'),
('libaec', '1.0.2',
'https://gitlab.dkrz.de/api/v4/projects/k202009%2Flibaec/repository/archive.tar.gz?sha=v1.0.2'),
('icet', '2.1.1',
'https://gitlab.kitware.com/api/v4/projects/icet%2Ficet/repository/archive.tar.bz2?sha=IceT-2.1.1'),
('vtk-m', '1.3.0',
'https://gitlab.kitware.com/api/v4/projects/vtk%2Fvtk-m/repository/archive.tar.gz?sha=v1.3.0'),
('GATK', '3.8-1-0-gf15c1c3ef',
'https://software.broadinstitute.org/gatk/download/auth?package=GATK-archive&version=3.8-1-0-gf15c1c3ef'),
# stem queries
('slepc', '3.6.2', 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz'),
('otf', '1.12.5salmon',
'http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz'),
('eospac', '6.4.0beta.1',
'http://laws-green.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz'),
('vampirtrace', '5.14.4',
'http://wwwpub.zih.tu-dresden.de/~mlieber/dcount/dcount.php?package=vampirtrace&get=VampirTrace-5.14.4.tar.gz'),
('EvtGen', '01.07.00',
'https://evtgen.hepforge.org/downloads?f=EvtGen-01.07.00.tar.gz'),
# (we don't actually look for these, they are picked up
# during the preliminary stem parsing)
('octopus', '6.0', 'http://octopus-code.org/down.php?file=6.0/octopus-6.0.tar.gz'),
('cloog', '0.18.1', 'http://www.bastoul.net/cloog/pages/download/count.php3?url=./cloog-0.18.1.tar.gz'),
('libxc', '2.2.2', 'http://www.tddft.org/programs/octopus/down.php?file=libxc/libxc-2.2.2.tar.gz'),
('cistem', '1.0.0-beta',
'https://cistem.org/system/tdf/upload3/cistem-1.0.0-beta-source-code.tar.gz?file=1&type=cistem_details&id=37&force=0'),
('Magics', '4.1.0',
'https://confluence.ecmwf.int/download/attachments/3473464/Magics-4.1.0-Source.tar.gz?api=v2'),
('grib_api', '1.17.0',
'https://software.ecmwf.int/wiki/download/attachments/3473437/grib_api-1.17.0-Source.tar.gz?api=v2'),
('eccodes', '2.2.0',
'https://software.ecmwf.int/wiki/download/attachments/45757960/eccodes-2.2.0-Source.tar.gz?api=v2'),
('SWFFT', '1.0',
'https://xgitlab.cels.anl.gov/api/v4/projects/hacc%2FSWFFT/repository/archive.tar.gz?sha=v1.0'),
# 9th Pass: Version in path
# github.com/repo/name/releases/download/name-vver/name
('nextflow', '0.20.1', 'https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow'),
# ver/name
('ncbi', '2.2.26', 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.26/ncbi.tar.gz'),
# Other tests for corner cases
# single character name
('R', '3.3.2', 'https://cloud.r-project.org/src/base/R-3/R-3.3.2.tar.gz'),
# name starts with digit
('3to2', '1.1.1', 'https://pypi.python.org/packages/source/3/3to2/3to2-1.1.1.zip'),
# plus in name
('gtk+', '2.24.31', 'http://ftp.gnome.org/pub/gnome/sources/gtk+/2.24/gtk+-2.24.31.tar.xz'),
('voro++', '0.4.6', 'http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz'),
# Name comes before download.php
('sionlib', '1.7.1', 'http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1'),
# Ignore download.php
('slepc', '3.6.2', 'http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz'),
('ScientificPython', '2.8.1',
'https://sourcesup.renater.fr/frs/download.php/file/4411/ScientificPython-2.8.1.tar.gz'),
# gloox beta style
('gloox', '1.0-beta7', 'http://camaya.net/download/gloox-1.0-beta7.tar.bz2'),
# sphinx beta style
('sphinx', '1.10-beta', 'http://sphinxsearch.com/downloads/sphinx-1.10-beta.tar.gz'),
# ruby version style
('ruby', '1.9.1-p243', 'ftp://ftp.ruby-lang.org/pub/ruby/1.9/ruby-1.9.1-p243.tar.gz'),
# rc style
('libvorbis', '1.2.2rc1', 'http://downloads.xiph.org/releases/vorbis/libvorbis-1.2.2rc1.tar.bz2'),
# dash rc style
('js', '1.8.0-rc1', 'http://ftp.mozilla.org/pub/mozilla.org/js/js-1.8.0-rc1.tar.gz'),
# apache version style
('apache-cassandra', '1.2.0-rc2',
'http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz'),
# xaw3d version
('Xaw3d', '1.5E', 'ftp://ftp.visi.com/users/hawkeyd/X/Xaw3d-1.5E.tar.gz'),
# fann version
('fann', '2.1.0beta', 'http://downloads.sourceforge.net/project/fann/fann/2.1.0beta/fann-2.1.0beta.zip'),
# imap version
('imap', '2007f', 'ftp://ftp.cac.washington.edu/imap/imap-2007f.tar.gz'),
# suite3270 version
('suite3270', '3.3.12ga7',
'http://sourceforge.net/projects/x3270/files/x3270/3.3.12ga7/suite3270-3.3.12ga7-src.tgz'),
# scalasca version
('cube', '4.2.3', 'http://apps.fz-juelich.de/scalasca/releases/cube/4.2/dist/cube-4.2.3.tar.gz'),
('cube', '4.3-TP1', 'http://apps.fz-juelich.de/scalasca/releases/cube/4.3/dist/cube-4.3-TP1.tar.gz'),
# github raw url
('CLAMR', '2.0.7', 'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true'),
# luaposix version
('luaposix', '33.4.0', 'https://github.com/luaposix/luaposix/archive/release-v33.4.0.tar.gz'),
# nco version
('nco', '4.6.2-beta03', 'https://github.com/nco/nco/archive/4.6.2-beta03.tar.gz'),
('nco', '4.6.3-alpha04', 'https://github.com/nco/nco/archive/4.6.3-alpha04.tar.gz'),
])
def test_url_parse_name_and_version(name, version, url):
# Make sure correct name and version are extracted.
parsed_name, parsed_version = parse_name_and_version(url)
assert parsed_name == name
assert parsed_version == Version(version)
# Make sure Spack formulates the right URL when we try to
# build one with a specific version.
assert url == substitute_version(url, version)
@pytest.mark.parametrize('not_detectable_url', [
'http://www.netlib.org/blas/blast-forum/cblas.tgz',
'http://www.netlib.org/voronoi/triangle.zip',
])
def test_no_version(not_detectable_url):
with pytest.raises(UndetectableVersionError):
parse_name_and_version(not_detectable_url)
| LLNL/spack | lib/spack/spack/test/url_parse.py | Python | lgpl-2.1 | 26,545 | [
"BLAST",
"Gromacs",
"HOOMD-blue",
"Jmol",
"Octopus",
"OpenKIM",
"VTK",
"VisIt"
] | c59d6d03055f74c38ea95b2233953e5650de99c053832025c565e18a60e7de37 |
"""
Tests for Django views.
"""
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client
from django.test import TestCase
from main.models import AlignmentGroup
from main.models import Chromosome
from main.models import Project
from main.models import ReferenceGenome
from main.models import Variant
from main.models import VariantAlternate
TEST_USERNAME = 'testusername'
TEST_PASSWORD = 'password'
TEST_EMAIL = 'test@test.com'
STATUS_CODE__SUCCESS = 200
STATUS_CODE__NOT_LOGGED_IN = 302
STATUS_CODE__NOT_FOUND = 404
STATUS_CODE__REDIRECT = 302
STATUS_CODE__SERVER_ERROR = 500
class TestViews(TestCase):
def setUp(self):
# Test models.
user = User.objects.create_user(TEST_USERNAME, password=TEST_PASSWORD,
email=TEST_EMAIL)
self.test_project = Project.objects.create(owner=user.get_profile(),
title='Test Project')
self.ref_genome = ReferenceGenome.objects.create(
project=self.test_project, label='refgenome')
self.chromosome = Chromosome.objects.create(
reference_genome=self.ref_genome,
label='Chromosome',
num_bases=9001)
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=self.ref_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome,
chromosome=self.chromosome,
position=10,
ref_value='A')
VariantAlternate.objects.create(
variant=variant,
alt_value='G')
# Urls that do not require the user to be logged in.
self.no_login_required_urls = [
reverse('main.views.home_view'),
]
# Urls that require the user to be logged in, but do not try any
# particular entity.
self.non_specific_login_required_urls = [
reverse('main.views.project_list_view'),
reverse('main.views.project_create_view'),
]
# Urls for a specific entity.
self.specific_entity_urls = [
# Tab base views.
reverse('main.views.project_view',
args=(self.test_project.uid,)),
reverse('main.views.tab_root_analyze',
args=(self.test_project.uid,)),
# Project-specific views
reverse('main.views.project_view',
args=(self.test_project.uid,)),
# Reference genomes
reverse('main.views.reference_genome_list_view',
args=(self.test_project.uid,)),
reverse('main.views.reference_genome_view',
args=(self.test_project.uid, self.ref_genome.uid)),
# Alignments
reverse('main.views.alignment_list_view',
args=(self.test_project.uid,)),
reverse('main.views.alignment_create_view',
args=(self.test_project.uid,)),
reverse('main.views.alignment_view',
args=(self.test_project.uid, alignment_group.uid)),
# Variant sets
reverse('main.views.variant_set_list_view',
args=(self.test_project.uid,)),
# Samples
reverse('main.views.sample_list_view',
args=(self.test_project.uid,)),
]
# The fake web browser client used to make requests.
self.client = Client()
def assert_url_response(self, url, expected_status_code):
"""Helper method that calls a URL and compares the response status
code to expected_status_code.
"""
response = self.client.get(url)
self.assertEqual(expected_status_code, response.status_code,
("Simple url test failed for %s with status code %d. " +
"Expected status code %d.") % (
url, response.status_code, expected_status_code))
def test_views__logged_out(self):
"""Tests calling the views without a logged in user.
"""
login_error_urls = (self.non_specific_login_required_urls +
self.specific_entity_urls)
for url in login_error_urls:
self.assert_url_response(url, STATUS_CODE__NOT_LOGGED_IN)
success_urls = self.no_login_required_urls
for url in success_urls:
self.assert_url_response(url, STATUS_CODE__SUCCESS)
def test_views__logged_in_owner(self):
"""Tests calling views with the owner logged in.
"""
self.client.login(username=TEST_USERNAME, password=TEST_PASSWORD)
all_urls = (self.no_login_required_urls +
self.non_specific_login_required_urls +
self.specific_entity_urls)
for url in all_urls:
self.assert_url_response(url, STATUS_CODE__SUCCESS)
def test_views__logged_in_non_owner(self):
"""Tests calling views with the non-owner logged in.
"""
OTHER_USERNAME = 'justtest'
OTHER_PASSWORD = 'other_password'
OTHER_EMAIL = 'justtest@me.com'
User.objects.create_user(
OTHER_USERNAME, password=OTHER_PASSWORD, email=OTHER_EMAIL)
self.client.login(username=OTHER_USERNAME, password=OTHER_PASSWORD)
error_urls = self.specific_entity_urls
for url in error_urls:
self.assert_url_response(url, STATUS_CODE__NOT_FOUND)
success_urls = (self.non_specific_login_required_urls +
self.no_login_required_urls)
for url in success_urls:
self.assert_url_response(url, STATUS_CODE__SUCCESS)
def test_compile_jbrowse_and_redirect(self):
"""Tests the JBrowse redirect handler.
"""
self.client.login(username=TEST_USERNAME, password=TEST_PASSWORD)
# If invalid ids, then 404.
url = ('/redirect_jbrowse?data=/jbrowse/gd_data/projects/167e93/' +
'ref_genomes/ad561ec6/jbrowse')
self.assert_url_response(url, STATUS_CODE__NOT_FOUND)
# If valid ids, then successful redirect.
url = ('/redirect_jbrowse?data=/jbrowse/gd_data/projects/%s/ref_genomes/%s/jbrowse' % (
self.test_project.uid, self.ref_genome.uid))
self.assert_url_response(url, STATUS_CODE__REDIRECT)
# Incorrect. Note the "http://localhost" incorrectly injected.
url = ('/redirect_jbrowse?data=http://localhost/jbrowse/gd_data/'
'projects/16167e93/ref_genomes/ad561ec6/jbrowse')
with self.assertRaises(AssertionError):
self.client.get(url)
| woodymit/millstone_accidental_source | genome_designer/main/tests/test_views.py | Python | mit | 6,929 | [
"BWA"
] | e17be15d34859f79899c627e52edfb22bcba5f09493f91a0abc9a99f78b7e463 |
import pysam
samfile = pysam.Samfile( "test.sam", "r" )
for alignedread in samfile.fetch('2L', 100, 120):
print alignedread
samfile.close()
| humberto-ortiz/dmel-ercc | samtest.py | Python | gpl-3.0 | 147 | [
"pysam"
] | e285ad93e2f10732daff14eb68d8421c0c4e3356a30088c4439694488f584eb1 |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import distributions
import numpy as np
class DistributionsTest(absltest.TestCase):
def test_mixture_returns_components(self):
my_distribution = distributions.Mixture(
components=[distributions.Constant((0,)),
distributions.Constant((1,))],
weights=[0.1, 0.9])
rng = np.random.RandomState(seed=100)
samples = [my_distribution.sample(rng) for _ in range(1000)]
self.assertSetEqual(set(samples), {(0,), (1,)})
self.assertAlmostEqual(np.mean(samples), 0.9, delta=0.1)
def test_bernoulli_returns_proportionally(self):
my_distribution = distributions.Bernoulli(p=0.9)
rng = np.random.RandomState(seed=100)
samples = [my_distribution.sample(rng) for _ in range(1000)]
self.assertAlmostEqual(np.mean(samples), 0.9, delta=0.1)
def test_constant_returns_the_same_thing(self):
my_distribution = distributions.Constant(mean=(0, 1, 2))
rng = np.random.RandomState(seed=100)
unique_samples = {my_distribution.sample(rng) for _ in range(1000)}
self.assertEqual(unique_samples, {(0, 1, 2)})
def test_gaussian_has_right_mean_std(self):
my_distribution = distributions.Gaussian(mean=[0, 0, 1], std=0.1)
rng = np.random.RandomState(seed=100)
samples = [my_distribution.sample(rng) for _ in range(1000)]
self.assertLess(
np.linalg.norm(np.mean(samples, 0) - np.array([0, 0, 1])), 0.1)
self.assertLess(
np.linalg.norm(np.std(samples, 0) - np.array([0.1, 0.1, 0.1])), 0.1)
def test_improper_distributions_raise_errors(self):
for p in [-10, -0.9, 1.3]:
with self.assertRaises(ValueError):
_ = distributions.Bernoulli(p=p)
for vec in [
[0.1, 0.3, 0.5], # Does not sum to one.
[0.5, 0.9, -0.4], # Has negative values.
]:
with self.assertRaises(ValueError):
_ = distributions.Mixture(
weights=vec,
components=[distributions.Constant(mean=(0,))] * len(vec))
if __name__ == '__main__':
absltest.main()
| google/ml-fairness-gym | distributions_test.py | Python | apache-2.0 | 2,755 | [
"Gaussian"
] | 738d7eea295229bb593e167d0415589b83a7cdad23dccb664fb8987f93be1b33 |
"""\
fermi_dirac.py: Utilities for finite temperature Fermi-Dirac occupations.
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
import sys
import settings
from NumWrap import matrixmultiply,transpose
from math import exp,log
from Constants import Kboltz
from LA2 import mkdens
import logging
logger = logging.getLogger("pyquante")
def mkdens_fermi(nel,orbe,orbs,e_temp):
"""
mkdens_fermi(nel,orbe,orbs,e_temp)
Create a density matrix from the orbitals, Orbs, and the Fermi-Dirac
occupations, Occs, derived from the orbital energies, Orbe, given the
electron temperature, e_temp.
D = Orbs*Occs(Orbe)Orbs^T
Arguments:
nel Number of electrons in the system
orbe The orbital energies
orbs The orbitals
e_temp The electron temperature
"""
efermi = get_efermi(nel,orbe,e_temp)
occs = get_fermi_occs(efermi,orbe,e_temp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,e_temp)
return D,entropy
def mkdens_occs(c,occs,**kwargs):
"Density matrix from a set of occupations (e.g. from FD expression)."
tol = kwargs.get('tol',settings.FDOccTolerance)
verbose = kwargs.get('verbose')
# Determine how many orbs have occupations greater than 0
norb = 0
for fi in occs:
if fi < tol: break
norb += 1
if verbose:
print "mkdens_occs: %d occupied orbitals found" % norb
# Determine how many doubly occupied orbitals we have
nclosed = 0
for i in xrange(norb):
if abs(1.-occs[i]) > tol: break
nclosed += 1
if verbose:
print "mkdens_occs: %d closed-shell orbitals found" % nclosed
D = mkdens(c,0,nclosed)
for i in xrange(nclosed,norb):
D = D + occs[i]*matrixmultiply(c[:,i:i+1],transpose(c[:,i:i+1]))
return D
def get_fermi_occ(efermi,en,temp):
kT = Kboltz*temp
x = (en-efermi)/kT
if x < -50.: return 1.
elif x > 50.: return 0
return 1/(1+exp(x))
def get_entropy(occs,temp):
kT = Kboltz*temp
entropy = 0
for fi in occs:
if abs(fi) < 1e-10: break # stop summing when occs get small
if fi > 1e-10:
entropy += kT*fi*log(fi)
if (1-fi) > 1e-10:
entropy += kT*(1.-fi)*log(1.-fi)
return entropy
def get_fermi_occs(efermi,orbe,temp):
occs = []
for en in orbe:
occs.append(get_fermi_occ(efermi,en,temp))
return occs
def get_t0_occs(nel,nbf):
occs = [0]*nbf
nc,no = divmod(nel,2)
for i in xrange(nc): occs[i] = 1.
for i in xrange(nc,nc+no): occs[i] = 0.5
return occs
def get_efermi(nel,orbe,temp,**kwargs):
"Bisection method to get Fermi energy from Fermi-Dirac dist"
tol = kwargs.get('tol',settings.FDTolerance)
verbose = kwargs.get('verbose')
elow,ehigh = orbe[0]-100.,orbe[-1]
nlow = 2*sum(get_fermi_occs(elow,orbe,temp))
nhigh = 2*sum(get_fermi_occs(ehigh,orbe,temp))
if nlow > nel:
logger.error("elow incorrect %f -> %f " % (elow,nlow))
raise Exception("elow incorrect %f -> %f " % (elow,nlow))
if nhigh < nel:
logger.error("ehigh incorrect %f -> %f " % (ehigh,nhigh))
raise Exception("ehigh incorrect %f -> %f " % (ehigh,nhigh))
for i in xrange(100):
efermi = (elow+ehigh)/2
n = 2*sum(get_fermi_occs(efermi,orbe,temp))
if abs(n-nel) < tol:
break
elif n < nel:
elow = efermi
else:
ehigh = efermi
else:
print "get_fd_occs: Too many iterations"
return efermi
| berquist/PyQuante | PyQuante/fermi_dirac.py | Python | bsd-3-clause | 3,757 | [
"DIRAC"
] | d61d8bbab8c29162942c404633ad7b32b416cd9165425d7fe5b3cad8e07ff3e6 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the courseware unit bookmarks.
"""
import json
import requests
from ...pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ...pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from ...pages.lms.bookmarks import BookmarksPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.common.logout import LogoutPage
from ...pages.common import BASE_URL
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import EventsTestMixin, UniqueCourseTest, is_404_page
class BookmarksTestMixin(EventsTestMixin, UniqueCourseTest):
"""
Mixin with helper methods for testing Bookmarks.
"""
USERNAME = "STUDENT"
EMAIL = "student@example.com"
def create_course_fixture(self, num_chapters):
"""
Create course fixture
Arguments:
num_chapters: number of chapters to create
"""
self.course_fixture = CourseFixture( # pylint: disable=attribute-defined-outside-init
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
xblocks = []
for index in range(num_chapters):
xblocks += [
XBlockFixtureDesc('chapter', 'TestSection{}'.format(index)).add_children(
XBlockFixtureDesc('sequential', 'TestSubsection{}'.format(index)).add_children(
XBlockFixtureDesc('vertical', 'TestVertical{}'.format(index))
)
)
]
self.course_fixture.add_children(*xblocks).install()
def verify_event_data(self, event_type, event_data):
"""
Verify emitted event data.
Arguments:
event_type: expected event type
event_data: expected event data
"""
actual_events = self.wait_for_events(event_filter={'event_type': event_type}, number_of_matches=1)
self.assert_events_match(event_data, actual_events)
class BookmarksTest(BookmarksTestMixin):
"""
Tests to verify bookmarks functionality.
"""
def setUp(self):
"""
Initialize test setup.
"""
super(BookmarksTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.bookmarks_page = BookmarksPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
# Get session to be used for bookmarking units
self.session = requests.Session()
params = {'username': self.USERNAME, 'email': self.EMAIL, 'course_id': self.course_id}
response = self.session.get(BASE_URL + "/auto_auth", params=params)
self.assertTrue(response.ok, "Failed to get session")
def _test_setup(self, num_chapters=2):
"""
Setup test settings.
Arguments:
num_chapters: number of chapters to create in course
"""
self.create_course_fixture(num_chapters)
# Auto-auth register for the course.
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.courseware_page.visit()
def _bookmark_unit(self, location):
"""
Bookmark a unit
Arguments:
location (str): unit location
"""
_headers = {
'Content-type': 'application/json',
'X-CSRFToken': self.session.cookies['csrftoken'],
}
params = {'course_id': self.course_id}
data = json.dumps({'usage_id': location})
response = self.session.post(
BASE_URL + '/api/bookmarks/v1/bookmarks/',
data=data,
params=params,
headers=_headers
)
self.assertTrue(response.ok, "Failed to bookmark unit")
def _bookmark_units(self, num_units):
"""
Bookmark first `num_units` units
Arguments:
num_units(int): Number of units to bookmarks
"""
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
for index in range(num_units):
self._bookmark_unit(xblocks[index].locator)
def _breadcrumb(self, num_units, modified_name=None):
"""
Creates breadcrumbs for the first `num_units`
Arguments:
num_units(int): Number of units for which we want to create breadcrumbs
Returns:
list of breadcrumbs
"""
breadcrumbs = []
for index in range(num_units):
breadcrumbs.append(
[
'TestSection{}'.format(index),
'TestSubsection{}'.format(index),
modified_name if modified_name else 'TestVertical{}'.format(index)
]
)
return breadcrumbs
def _delete_section(self, index):
""" Delete a section at index `index` """
# Logout and login as staff
LogoutPage(self.browser).visit()
StudioAutoAuthPage(
self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id, staff=True
).visit()
# Visit course outline page in studio.
self.course_outline_page.visit()
self.course_outline_page.wait_for_page()
self.course_outline_page.section_at(index).delete()
# Logout and login as a student.
LogoutPage(self.browser).visit()
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
# Visit courseware as a student.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
def _toggle_bookmark_and_verify(self, bookmark_icon_state, bookmark_button_state, bookmarked_count):
"""
Bookmark/Un-Bookmark a unit and then verify
"""
self.assertTrue(self.courseware_page.bookmark_button_visible)
self.courseware_page.click_bookmark_unit_button()
self.assertEqual(self.courseware_page.bookmark_icon_visible, bookmark_icon_state)
self.assertEqual(self.courseware_page.bookmark_button_state, bookmark_button_state)
self.bookmarks_page.click_bookmarks_button()
self.assertEqual(self.bookmarks_page.count(), bookmarked_count)
def _verify_pagination_info(
self,
bookmark_count_on_current_page,
header_text,
previous_button_enabled,
next_button_enabled,
current_page_number,
total_pages
):
"""
Verify pagination info
"""
self.assertEqual(self.bookmarks_page.count(), bookmark_count_on_current_page)
self.assertEqual(self.bookmarks_page.get_pagination_header_text(), header_text)
self.assertEqual(self.bookmarks_page.is_previous_page_button_enabled(), previous_button_enabled)
self.assertEqual(self.bookmarks_page.is_next_page_button_enabled(), next_button_enabled)
self.assertEqual(self.bookmarks_page.get_current_page_number(), current_page_number)
self.assertEqual(self.bookmarks_page.get_total_pages, total_pages)
def _navigate_to_bookmarks_list(self):
"""
Navigates and verifies the bookmarks list page.
"""
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.results_header_text(), 'My Bookmarks')
def _verify_breadcrumbs(self, num_units, modified_name=None):
"""
Verifies the breadcrumb trail.
"""
bookmarked_breadcrumbs = self.bookmarks_page.breadcrumbs()
# Verify bookmarked breadcrumbs.
breadcrumbs = self._breadcrumb(num_units=num_units, modified_name=modified_name)
breadcrumbs.reverse()
self.assertEqual(bookmarked_breadcrumbs, breadcrumbs)
def update_and_publish_block_display_name(self, modified_name):
"""
Update and publish the block/unit display name.
"""
self.course_outline_page.visit()
self.course_outline_page.wait_for_page()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
container_page = section.subsection_at(0).unit_at(0).go_to()
self.course_fixture._update_xblock(container_page.locator, { # pylint: disable=protected-access
"metadata": {
"display_name": modified_name
}
})
container_page.visit()
container_page.wait_for_page()
self.assertEqual(container_page.name, modified_name)
container_page.publish_action.click()
def test_bookmark_button(self):
"""
Scenario: Bookmark unit button toggles correctly
Given that I am a registered user
And I visit my courseware page
For first 2 units
I visit the unit
And I can see the Bookmark button
When I click on Bookmark button
Then unit should be bookmarked
Then I click again on the bookmark button
And I should see a unit un-bookmarked
"""
self._test_setup()
for index in range(2):
self.course_nav.go_to_section('TestSection{}'.format(index), 'TestSubsection{}'.format(index))
self._toggle_bookmark_and_verify(True, 'bookmarked', 1)
self.bookmarks_page.click_bookmarks_button(False)
self._toggle_bookmark_and_verify(False, '', 0)
def test_empty_bookmarks_list(self):
"""
Scenario: An empty bookmarks list is shown if there are no bookmarked units.
Given that I am a registered user
And I visit my courseware page
And I can see the Bookmarks button
When I click on Bookmarks button
Then I should see an empty bookmarks list
And empty bookmarks list content is correct
"""
self._test_setup()
self.assertTrue(self.bookmarks_page.bookmarks_button_visible())
self.bookmarks_page.click_bookmarks_button()
self.assertEqual(self.bookmarks_page.results_header_text(), 'My Bookmarks')
self.assertEqual(self.bookmarks_page.empty_header_text(), 'You have not bookmarked any courseware pages yet.')
empty_list_text = ("Use bookmarks to help you easily return to courseware pages. To bookmark a page, "
"select Bookmark in the upper right corner of that page. To see a list of all your "
"bookmarks, select Bookmarks in the upper left corner of any courseware page.")
self.assertEqual(self.bookmarks_page.empty_list_text(), empty_list_text)
def test_bookmarks_list(self):
"""
Scenario: A bookmarks list is shown if there are bookmarked units.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked 2 units
When I click on Bookmarks button
Then I should see a bookmarked list with 2 bookmark links
And breadcrumb trail is correct for a bookmark
When I click on bookmarked link
Then I can navigate to correct bookmarked unit
"""
self._test_setup()
self._bookmark_units(2)
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=2)
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
# get usage ids for units
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
xblock_usage_ids = [xblock.locator for xblock in xblocks]
# Verify link navigation
for index in range(2):
self.bookmarks_page.click_bookmarked_block(index)
self.courseware_page.wait_for_page()
self.assertIn(self.courseware_page.active_usage_id(), xblock_usage_ids)
self.courseware_page.visit().wait_for_page()
self.bookmarks_page.click_bookmarks_button()
def test_bookmark_shows_updated_breadcrumb_after_publish(self):
"""
Scenario: A bookmark breadcrumb trail is updated after publishing the changed display name.
Given that I am a registered user
And I visit my courseware page
And I can see bookmarked unit
Then I visit unit page in studio
Then I change unit display_name
And I publish the changes
Then I visit my courseware page
And I visit bookmarks list page
When I see the bookmark
Then I can see the breadcrumb trail
with updated display_name.
"""
self._test_setup(num_chapters=1)
self._bookmark_units(num_units=1)
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=1)
LogoutPage(self.browser).visit()
LmsAutoAuthPage(
self.browser,
username=self.USERNAME,
email=self.EMAIL,
course_id=self.course_id,
staff=True
).visit()
modified_name = "Updated name"
self.update_and_publish_block_display_name(modified_name)
LogoutPage(self.browser).visit()
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.courseware_page.visit()
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=1, modified_name=modified_name)
def test_unreachable_bookmark(self):
"""
Scenario: We should get a HTTP 404 for an unreachable bookmark.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked 2 units
Then I delete a bookmarked unit
Then I click on Bookmarks button
And I should see a bookmarked list
When I click on deleted bookmark
Then I should navigated to 404 page
"""
self._test_setup(num_chapters=1)
self._bookmark_units(1)
self._delete_section(0)
self._navigate_to_bookmarks_list()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.assertTrue(is_404_page(self.browser))
def test_page_size_limit(self):
"""
Scenario: We can't get bookmarks more than default page size.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 11 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And bookmark list contains 10 bookmarked items
"""
self._test_setup(11)
self._bookmark_units(11)
self._navigate_to_bookmarks_list()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_single_page(self):
"""
Scenario: Bookmarks list pagination is working as expected for single page
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 2 units available
Then I click on Bookmarks button
And I should see a bookmarked list with 2 bookmarked items
And I should see paging header and footer with correct data
And previous and next buttons are disabled
"""
self._test_setup(num_chapters=2)
self._bookmark_units(num_units=2)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
def test_next_page_button(self):
"""
Scenario: Next button is working as expected for bookmarks list pagination
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
Then I click on Bookmarks button
And I should see a bookmarked list of 10 items
And I should see paging header and footer with correct info
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
"""
self._test_setup(num_chapters=12)
self._bookmark_units(num_units=12)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_previous_page_button(self):
"""
Scenario: Previous button is working as expected for bookmarks list pagination
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
And I click on Bookmarks button
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
Then I click on previous page button
And I should be navigated to first page
And I should see paging header and footer with correct info
"""
self._test_setup(num_chapters=12)
self._bookmark_units(num_units=12)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.bookmarks_page.press_previous_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_valid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for valid page number
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And I should see total page value is 2
Then I enter 2 in the page number input
And I should be navigated to page 2
"""
self._test_setup(num_chapters=11)
self._bookmark_units(num_units=11)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(2)
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 11-11 out of 11 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_pagination_with_invalid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for invalid page number
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 11 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And I should see total page value is 2
Then I enter 3 in the page number input
And I should stay at page 1
"""
self._test_setup(num_chapters=11)
self._bookmark_units(num_units=11)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(3)
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_bookmarked_unit_accessed_event(self):
"""
Scenario: Bookmark events are emitted with correct data when we access/visit a bookmarked unit.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked a unit
When I click on bookmarked unit
Then `edx.course.bookmark.accessed` event is emitted
"""
self._test_setup(num_chapters=1)
self.reset_event_tracking()
# create expected event data
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
event_data = [
{
'event': {
'bookmark_id': '{},{}'.format(self.USERNAME, xblocks[0].locator),
'component_type': xblocks[0].category,
'component_usage_id': xblocks[0].locator,
}
}
]
self._bookmark_units(num_units=1)
self.bookmarks_page.click_bookmarks_button()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.verify_event_data('edx.bookmark.accessed', event_data)
| IndonesiaX/edx-platform | common/test/acceptance/tests/lms/test_bookmarks.py | Python | agpl-3.0 | 23,679 | [
"VisIt"
] | fa4a5978aa8456781bcfa780cad69732d91a8b7b0e8fc2ec57753291bc54e69d |
"""
The following contains a database of small molecules
Data for the G2/97 database are from
Raghavachari, Redfern, and Pople, J. Chem. Phys. Vol. 106, 1063 (1997).
See http://www.cse.anl.gov/Catalysis_and_Energy_Conversion/Computational_Thermochemistry.shtml for the original files.
All numbers are experimental values, except for coordinates, which are
MP2(full)/6-31G(d) optimized geometries (from http://www.cse.anl.gov/OldCHMwebsiteContent/compmat/G2-97.htm)
Atomic species:
ref: Curtiss et al. JCP 106, 1063 (1997).
'Enthalpy' is the experimental enthalpies of formation at 0K
'thermal correction' is the thermal corrections H(298)-H(0)
Molecular species:
ref: Staroverov et al. JCP 119, 12129 (2003)
'Enthalpy' is the experimental enthalpies of formation at 298K
'ZPE' is the zero-point energies
'thermal correction' is the thermal enthalpy corrections H(298K) - H_exp(0K)
ZPE and thermal corrections are estimated from B3LYP geometries and vibrations.
Experimental ionization potentials are from http://srdata.nist.gov/cccbdb/.
For details about G2-1 and G2-2 sets see doi:10.1063/1.477422.
"""
from ase.data.g2_1 import data as data_g2_1
from ase.data.g2_2 import data as data_g2_2
data = data_g2_1.copy()
data.update(data_g2_2)
from ase.data.g2_1 import atom_names as atom_names_g2_1
from ase.data.g2_1 import molecule_names as molecule_names_g2_1
from ase.data.g2_2 import atom_names as atom_names_g2_2
from ase.data.g2_2 import molecule_names as molecule_names_g2_2
atom_names = []
for a in atom_names_g2_1 + atom_names_g2_2:
if a not in atom_names:
atom_names.append(a)
molecule_names = molecule_names_g2_1 + molecule_names_g2_2
from ase.data.g2_2 import get_ionization_energy
from ase.data.g2_2 import get_atomization_energy
| grhawk/ASE | tools/ase/data/g2.py | Python | gpl-2.0 | 1,764 | [
"ASE"
] | a3e6d415fb7d99f242bb9f24dfd02ad74dd8a89e29fe31a2721bbc9130f1eaca |
# Author: Yang Long <longyang_123@yeah.net>
#
# License: LGPL-2.1
import numpy as np
import time
from .Constraint import Constraints
from .MultiCandidate import MultiCandidates
from . import MultiUtils
class MultiGA:
'''
NSGA-II
'''
def __init__(self,func,targetsize,nvars,LB=None,UB=None,IntCon=None,initpopulation=None,maxgeneration=None,popsize=300,\
stallgenlimit=100,stalltimelimit=None,objectiveslimit=None,timelimit=None,TolCon=1.0*10**-6,TolFun=1.0*10**-6,diversitylimit=0.05,\
groupsize=1,migrateforward=True,migrationfraction=0.2,migrationinterval=20,\
paretofraction=0.01,crossoverfraction=0.8,mutationrate=0.1,\
verbose=False,parallelized=False,options=None):
self.func = func # Function to minimize
self.chromesize = nvars # Number of variants
self.targetsize = targetsize
# Lower Boundary
if LB is not None:
self.LB = np.array(LB)
else:
self.LB = LB
# Upper Boundary
if UB is not None:
self.UB = np.array(UB)
else:
self.UB = UB
# Integer Constraint
if IntCon is not None:
self.IntCon = np.array(IntCon)
else:
self.IntCon = IntCon
if IntCon is None: # Size of populations
self.popsize = popsize
else:
self.popsize = np.max([15*nvars,popsize])
self.initpopulation = initpopulation # Initial Populations
self.maxgeneration = maxgeneration # Max Generation to evlove
self.stallgenlimit = stallgenlimit
self.stalltimelimit = stalltimelimit
if objectiveslimit is not None:
self.objectiveslimit = np.array(objectiveslimit)
else:
self.objectiveslimit = objectiveslimit
self.timelimit = timelimit # Time Limit to run (in unit of seconds)
self.TolCon = TolCon
self.TolFun = TolFun
self.diversitylimit = diversitylimit
self.groupsize = groupsize
self.migrateforward = migrateforward
self.migrationfraction = migrationfraction
self.migrationinterval = migrationinterval
self.paretofraction = paretofraction
self.crossoverfraction = crossoverfraction
self.mutationrate = mutationrate
self.verbose = verbose # Print Computational Info
self.parallelized = parallelized
if options is not None:
self.options = options
else:
self.options = MultiUtils.MultiGAoptions.MultiGAoptions()
self.candidates = list() # Candidates
self.candidatestatus = np.zeros(groupsize)
self.constraints = Constraints() # Constraints
# Default Settings
self.createfunction = MultiUtils.Creation.Uniform
self.crossoverfunction = MultiUtils.Crossover.TwoPoint
self.mutationfunction = MultiUtils.Mutation.Uniform
self.fitnessscalingfunction = MultiUtils.FitnessScale.Rank
self.selectionfunction = MultiUtils.Selection.Tournament
self.distancefunction = MultiUtils.Pareto.FastNonDominatedSorting
# Stall Limit
self.stallobjectives = list()
self.stallgeneration = np.zeros(groupsize)
self.stallstarttime = np.zeros(groupsize)
self.stalltime = np.zeros(groupsize)
def addconstraint(self,constraintfunc,penalty=10):
self.constraints.add(constraintfunc,penalty)
def setparameter(self,parameter,value):
if parameter == 'createfunction':
if value == 'Uniform':
self.createfunction = Creation.Uniform
else:
return False
return True
if parameter == 'crossoverfunction':
if value == 'Laplacian':
self.crossoverfunction = Crossover.Laplacian
elif value == 'Scattered':
self.crossoverfunction = Crossover.Scattered
elif value == 'SinglePoint':
self.crossoverfunction = Crossover.SinglePoint
elif value == 'TwoPoint':
self.crossoverfunction = Crossover.TwoPoint
elif value == 'Intermediate':
self.crossoverfunction = Crossover.Intermediate
elif value == 'Heuristic':
self.crossoverfunction = Crossover.Heuristic
else:
return False
return True
if parameter == 'fitnessscalingfunction':
if value == 'Rank':
self.fitnessscalingfunction = FitnessScale.Rank
elif value == 'Proportional':
self.fitnessscalingfunction = FitnessScale.Proportional
elif value == 'ShiftLiner':
self.fitnessscalingfunction = FitnessScale.ShiftLiner
elif value == 'Top':
self.fitnessscalingfunction = FitnessScale.Top
else:
return False
return True
if parameter == 'mutationfunction':
if value == 'Uniform':
self.mutationfunction = Mutation.Uniform
elif value == 'Gaussian':
self.mutationfunction = Mutation.Gaussian
else:
return False
return True
if parameter == 'selectionfunction':
if value == 'Tournament':
self.selectionfunction = Selection.Tournament
elif value == 'StochasticUniform':
self.selectionfunction = Selection.StochasticUniform
elif value == 'Remainder':
self.selectionfunction = Selection.Reminder
elif value == 'Roulette':
self.selectionfunction = Selection.Roulette
else:
return False
return True
return False
def start(self):
'''
Main function to start GA
'''
starttime = time.time()
for i in range(self.groupsize):
self.candidates.append(MultiCandidates(popsize=self.popsize,chromesize=self.chromesize,func=self.func,targetsize=self.targetsize,\
constraints=self.constraints,IntCon=self.IntCon,LB=self.LB,UB=self.UB,\
initpopulation=self.initpopulation,paretofraction=self.paretofraction,\
crossoverfraction=self.crossoverfraction,mutationrate=self.mutationrate,\
createfunction=self.createfunction,\
crossoverfunction=self.crossoverfunction,\
mutationfunction=self.mutationfunction,\
selectionfunction=self.selectionfunction,\
fitnessscalingfunction=self.fitnessscalingfunction,\
distancefunction=self.distancefunction,\
verbose=self.verbose,options=self.options))
self.candidatestatus[i] = 0
self.stallobjectives.append(None)
if self.maxgeneration is not None:
for i in range(self.maxgeneration):
if self.verbose:
print('{num}th generation:'.format(num=i+1),end=' ')
self.update()
[status,code] = self.check()
# Terminate by the tolerance
if status:
if self.verbose:
print('Optimization terminated: \n{reason}'.format(reason=code))
break
if (i+1)%self.migrationinterval == 0:
self.migrate()
if self.verbose:
print('----Migration----')
# Terminate by the time limit
if self.timelimit is not None:
currenttime = time.time()
if currenttime-starttime > self.timelimit:
if self.verbose:
print('Optimization terminated: Time Limit!')
break
if self.verbose:
print('Optimization terminated: Maximum Generation')
else:
generation = 1
while 1:
if self.verbose:
print('{num}th generation:'.format(num=generation))
self.update()
[status,code] = self.check()
# Terminate by the tolerance
if status:
if self.verbose:
print('Optimization terminated: \n{reason}'.format(reason=code))
break
if generation%self.migrationinterval == 0:
self.migrate()
if self.verbose:
print('----Migration----')
# Terminate by the time limit
if self.timelimit is not None:
currenttime = time.time()
if currenttime-starttime > self.timelimit:
if self.verbose:
print('Optimization terminated: Time Limit!')
break
generation += 1
def check(self):
'''
Check tolerance of populations
'''
activecount = 0
for i in range(self.groupsize):
objectives = self.candidates[i].getallobjectives()
# Objectives Limit
if self.objectiveslimit is not None:
if np.sum(np.min(objectives,axis=0) < self.objectiveslimit) == sself.targetsize:
self.candidatestatus[i] = 1 # Objectives Limit
if self.stallobjectives[i] is None:
self.stallobjectives[i] = objectives
activecount += 1
continue
# Calculate Stall Generation
averagechange = np.sum(np.mean(objectives,axis=0)-np.mean(self.stallobjectives[i],axis=0))
base = np.sum(np.min(self.stallobjectives[i],axis=0))
if (averagechange/base < self.TolFun) and (len(objectives) > 1):
self.stallgeneration[i] += 1
self.stalltime[i] = self.stalltime[i] + time.time() - self.stallstarttime[i]
#if self.verbose:
# print ' -> Start stall: Generation {generation}th '.format(generation=self.stallgeneration)
else:
self.stallgeneration[i] = 0
self.stallstarttime[i] = time.time()
self.stalltime[i] = 0
self.stallobjectives[i] = objectives
# Stall Generation Limit
if self.stallgeneration[i] > self.stallgenlimit:
self.candidatestatus[i] = 2 # Stall Gen Limit
#if self.candidates[i].getdiversity() < self.diversitylimit:
# self.candidatestatus[i] = 3 # Diversity Limit
# Stall Time Limit
if self.stalltimelimit is not None:
if self.stalltime[i] > self.stalltimelimit:
self.candidatestatus[i] = 4 # Stall Time Limit
if self.candidatestatus[i] == 0:
activecount += 1 # Some group still alive
if activecount >= 1:
return False,None
else:
code = str()
for i in range(self.groupsize):
if self.candidatestatus[i] == 1:
code += '{num}th group -> {reason}\n'.format(num=i+1,reason='Objectives Limit')
elif self.candidatestatus[i] == 2:
code += '{num}th group -> {reason}\n'.format(num=i+1,reason='Stall Generation Limit')
elif self.candidatestatus[i] == 3:
code += '{num}th group -> {reason}\n'.format(num=i+1,reason='Diversity Limit')
elif self.candidatestatus[i] == 4:
code += '{num}th group -> {reason}\n'.format(num=i+1,reason='Stall Time Limit')
return True,code
def update(self):
'''
Evolve every generation
'''
for i in range(self.groupsize):
if self.candidatestatus[i] == 0: # Candidate[i] is on Active State
self.candidates[i].update()
def migrate(self):
'''
Migrate subpopulations
'''
popsize = int(self.popsize*self.migrationfraction)
if self.migrateforward:
for i in range(self.groupsize):
(population1,source1) = self.candidates[i].migrateout(popsize)
if i+1<self.groupsize:
(population2,source2) = self.candidates[i+1].migrateout(popsize)
self.candidates[i].migratein(source2,population2)
self.candidates[i+1].migratein(source1,population1)
else:
(population2,source2) = self.candidates[0].migrateout(popsize)
self.candidates[i].migratein(source2,population2)
self.candidates[0].migratein(source1,population1)
else:
for i in range(self.groupsize,-1,-1):
(population1,source1) = self.candidates[i].migrateout(popsize)
if i-1>=0:
(population2,source2) = self.candidates[i-1].migrateout(popsize)
self.candidates[i].migratein(source2,population2)
self.candidates[i-1].migratein(source1,population1)
else:
(population2,source2) = self.candidates[self.groupsize].migrateout(popsize)
self.candidates[i].migratein(source2,population2)
self.candidates[self.groupsize].migratein(source1,population1)
def getcache(self):
solutions = np.zeros((self.popsize*self.groupsize,self.chromesize))
objectives = np.zeros(self.popsize*self.groupsize,self.targetsize)
for i in range(self.groupsize):
solutions[(i)*self.popsize:(i+1)*self.popsize,:] = self.candidates[i].getallcandidates()
objectives[(i)*self.popsize:(i+1)*self.popsize,:] = self.candidates[i].getallobjectives()
return solutions,objectives
def getsolution(self):
solutions = list()
objectives = list()
for i in range(self.groupsize):
(solution,objective) = self.candidates[i].getfrontier()
popsize = np.size(solution,axis=0)
for j in range(popsize):
solutions.append(solution[j])
objectives.append(objective[j])
return np.array(solutions),np.array(objectives)
| longyangking/Husky | Husky/GA/MultiGA.py | Python | lgpl-2.1 | 15,032 | [
"Gaussian"
] | a2c5c202e676591df07d87ba85c278a71f60aed3f73dd20e8099aad514d06caf |
"""
@name: Modules/Core/Config/import_tools.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com>
@copyright: (c) 2019-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Oct 19, 2019
@Summary: This handles
"""
__updated__ = '2020-01-06'
__version_info__ = (19, 11, 28)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
import importlib
# Import PyMh files
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.ImportTools ')
class Tools:
"""
"""
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
LOG.debug('Setting up Import_Tools.Tools')
self.m_pyhouse_obj = p_pyhouse_obj
def _do_import(self, p_name, p_path):
""" This will import a module.
Used when we discover that the module is needed because:
It is required
Configuration calles for it.
@param p_name: is the name of the module ('pandora')
@param p_path: is the relative path to the module ('Modules.House.Entertainment')
@return: a pointer to the module or None
"""
l_path = p_path + '.' + p_name
# l_package = p_path + '.'
LOG.debug('Importing\n\tModule: {}\n\tPath: {}'.format(p_name, l_path))
try:
l_ret = importlib.import_module(l_path)
except ImportError as e_err:
l_msg = 'PROG ERROR importing module: "{}"\n\tErr:{}.'.format(p_name, e_err)
LOG.error(l_msg)
l_ret = None
# LOG.info('Imported "{}" ({})'.format(p_name, l_path))
return l_ret
def XXXimport_module_get_api(self, p_module, p_path):
""" import a module with a path
@param p_module: is a module name ("Cameras")
@param p_path: is the starting point to look for the module to import.
@return: an initialized Api
"""
l_module_name = p_module
l_ret = self._do_import(l_module_name, p_path)
try:
LOG.debug('Get Api for "{}"'.format(l_module_name))
# LOG.debug(PrettyFormatAny.form(l_ret, 'Module'))
l_api = l_ret.Api(self.m_pyhouse_obj)
except Exception as e_err:
LOG.error('ERROR - Initializing Module: "{}"\n\tError: {}'.format(p_module, e_err))
# LOG.error('Ref: {}'.format(PrettyFormatAny.form(l_ret, 'ModuleRef')))
l_api = None
# LOG.debug('Imported: {}'.format(l_ret))
return l_api
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/Core/Config/import_tools.py | Python | mit | 2,581 | [
"Brian"
] | 08d0943482945ab1ca7042e7aae7611ce4049336b3f6f441f111204f19b775be |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.runners.interactive.pipeline_instrument."""
# pytype: skip-file
import tempfile
import unittest
import apache_beam as beam
from apache_beam import coders
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.interactive import cache_manager as cache
from apache_beam.runners.interactive import interactive_beam as ib
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import pipeline_instrument as instr
from apache_beam.runners.interactive import interactive_runner
from apache_beam.runners.interactive.caching.streaming_cache import StreamingCache
from apache_beam.runners.interactive.testing.pipeline_assertion import assert_pipeline_equal
from apache_beam.runners.interactive.testing.pipeline_assertion import assert_pipeline_proto_contain_top_level_transform
from apache_beam.runners.interactive.testing.pipeline_assertion import assert_pipeline_proto_equal
from apache_beam.runners.interactive.testing.pipeline_assertion import \
assert_pipeline_proto_not_contain_top_level_transform
from apache_beam.runners.interactive.testing.test_cache_manager import InMemoryCache
from apache_beam.testing.test_stream import TestStream
class PipelineInstrumentTest(unittest.TestCase):
def setUp(self):
ie.new_env()
def cache_key_of(self, name, pcoll):
return repr(
instr.CacheKey(
name,
str(id(pcoll)),
str(id(pcoll.producer)),
str(id(pcoll.pipeline))))
def test_pcolls_to_pcoll_id(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
# pylint: disable=range-builtin-not-iterating
init_pcoll = p | 'Init Create' >> beam.Impulse()
_, ctx = p.to_runner_api(return_context=True)
self.assertEqual(
instr.pcolls_to_pcoll_id(p, ctx),
{str(init_pcoll): 'ref_PCollection_PCollection_1'})
def test_cacheable_key_without_version_map(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
# pylint: disable=range-builtin-not-iterating
init_pcoll = p | 'Init Create' >> beam.Create(range(10))
_, ctx = p.to_runner_api(return_context=True)
self.assertEqual(
instr.cacheable_key(init_pcoll, instr.pcolls_to_pcoll_id(p, ctx)),
str(id(init_pcoll)) + '_ref_PCollection_PCollection_8')
def test_cacheable_key_with_version_map(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
# pylint: disable=range-builtin-not-iterating
init_pcoll = p | 'Init Create' >> beam.Create(range(10))
# It's normal that when executing, the pipeline object is a different
# but equivalent instance from what user has built. The pipeline instrument
# should be able to identify if the original instance has changed in an
# interactive env while mutating the other instance for execution. The
# version map can be used to figure out what the PCollection instances are
# in the original instance and if the evaluation has changed since last
# execution.
p2 = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p2)
# pylint: disable=range-builtin-not-iterating
init_pcoll_2 = p2 | 'Init Create' >> beam.Create(range(10))
_, ctx = p2.to_runner_api(return_context=True)
# The cacheable_key should use id(init_pcoll) as prefix even when
# init_pcoll_2 is supplied as long as the version map is given.
self.assertEqual(
instr.cacheable_key(
init_pcoll_2,
instr.pcolls_to_pcoll_id(p2, ctx),
{'ref_PCollection_PCollection_8': str(id(init_pcoll))}),
str(id(init_pcoll)) + '_ref_PCollection_PCollection_8')
def test_cache_key(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
# pylint: disable=range-builtin-not-iterating
init_pcoll = p | 'Init Create' >> beam.Create(range(10))
squares = init_pcoll | 'Square' >> beam.Map(lambda x: x * x)
cubes = init_pcoll | 'Cube' >> beam.Map(lambda x: x**3)
# Watch the local variables, i.e., the Beam pipeline defined.
ib.watch(locals())
pipeline_instrument = instr.build_pipeline_instrument(p)
self.assertEqual(
pipeline_instrument.cache_key(init_pcoll),
self.cache_key_of('init_pcoll', init_pcoll))
self.assertEqual(
pipeline_instrument.cache_key(squares),
self.cache_key_of('squares', squares))
self.assertEqual(
pipeline_instrument.cache_key(cubes), self.cache_key_of('cubes', cubes))
def test_cacheables(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
# pylint: disable=range-builtin-not-iterating
init_pcoll = p | 'Init Create' >> beam.Create(range(10))
squares = init_pcoll | 'Square' >> beam.Map(lambda x: x * x)
cubes = init_pcoll | 'Cube' >> beam.Map(lambda x: x**3)
ib.watch(locals())
pipeline_instrument = instr.build_pipeline_instrument(p)
# TODO(BEAM-7760): The PipelineInstrument cacheables maintains a global list
# of cacheable PCollections across all pipelines. Here we take the subset of
# cacheables that only pertain to this test's pipeline.
cacheables = {
k: c
for k,
c in pipeline_instrument.cacheables.items() if c.pcoll.pipeline is p
}
self.assertEqual(
cacheables,
{
pipeline_instrument._cacheable_key(init_pcoll): instr.Cacheable(
var='init_pcoll',
version=str(id(init_pcoll)),
pcoll_id='ref_PCollection_PCollection_8',
producer_version=str(id(init_pcoll.producer)),
pcoll=init_pcoll),
pipeline_instrument._cacheable_key(squares): instr.Cacheable(
var='squares',
version=str(id(squares)),
pcoll_id='ref_PCollection_PCollection_9',
producer_version=str(id(squares.producer)),
pcoll=squares),
pipeline_instrument._cacheable_key(cubes): instr.Cacheable(
var='cubes',
version=str(id(cubes)),
pcoll_id='ref_PCollection_PCollection_10',
producer_version=str(id(cubes.producer)),
pcoll=cubes)
})
def test_has_unbounded_source(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
_ = p | 'ReadUnboundedSource' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
self.assertTrue(instr.has_unbounded_sources(p))
def test_not_has_unbounded_source(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(b'test')
_ = p | 'ReadBoundedSource' >> beam.io.ReadFromText(f.name)
self.assertFalse(instr.has_unbounded_sources(p))
def test_background_caching_pipeline_proto(self):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(StreamingCache(cache_dir=None), p)
# Test that the two ReadFromPubSub are correctly cut out.
a = p | 'ReadUnboundedSourceA' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
b = p | 'ReadUnboundedSourceB' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
# Add some extra PTransform afterwards to make sure that only the unbounded
# sources remain.
c = (a, b) | beam.Flatten()
_ = c | beam.Map(lambda x: x)
ib.watch(locals())
instrumenter = instr.build_pipeline_instrument(p)
actual_pipeline = instrumenter.background_caching_pipeline_proto()
# Now recreate the expected pipeline, which should only have the unbounded
# sources.
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(StreamingCache(cache_dir=None), p)
a = p | 'ReadUnboundedSourceA' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
_ = (
a
| 'reify a' >> beam.Map(lambda _: _)
| 'a' >> cache.WriteCache(ie.current_env().get_cache_manager(p), ''))
b = p | 'ReadUnboundedSourceB' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
_ = (
b
| 'reify b' >> beam.Map(lambda _: _)
| 'b' >> cache.WriteCache(ie.current_env().get_cache_manager(p), ''))
expected_pipeline = p.to_runner_api(return_context=False)
assert_pipeline_proto_equal(self, expected_pipeline, actual_pipeline)
def _example_pipeline(self, watch=True, bounded=True):
p = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(InMemoryCache(), p)
# pylint: disable=range-builtin-not-iterating
if bounded:
source = beam.Create(range(10))
else:
source = beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
init_pcoll = p | 'Init Source' >> source
second_pcoll = init_pcoll | 'Second' >> beam.Map(lambda x: x * x)
if watch:
ib.watch(locals())
return (p, init_pcoll, second_pcoll)
def _mock_write_cache(self, pipeline, values, cache_key):
"""Cache the PCollection where cache.WriteCache would write to."""
labels = ['full', cache_key]
# Usually, the pcoder will be inferred from `pcoll.element_type`
pcoder = coders.registry.get_coder(object)
cache_manager = ie.current_env().get_cache_manager(pipeline)
cache_manager.save_pcoder(pcoder, *labels)
cache_manager.write(values, *labels)
def test_instrument_example_pipeline_to_write_cache(self):
# Original instance defined by user code has all variables handlers.
p_origin, init_pcoll, second_pcoll = self._example_pipeline()
# Copied instance when execution has no user defined variables.
p_copy, _, _ = self._example_pipeline(False)
# Instrument the copied pipeline.
pipeline_instrument = instr.build_pipeline_instrument(p_copy)
# Manually instrument original pipeline with expected pipeline transforms.
init_pcoll_cache_key = pipeline_instrument.cache_key(init_pcoll)
_ = (
init_pcoll
| 'reify init' >> beam.Map(lambda _: _)
| '_WriteCache_' + init_pcoll_cache_key >> cache.WriteCache(
ie.current_env().get_cache_manager(p_origin), init_pcoll_cache_key))
second_pcoll_cache_key = pipeline_instrument.cache_key(second_pcoll)
_ = (
second_pcoll
| 'reify second' >> beam.Map(lambda _: _)
| '_WriteCache_' + second_pcoll_cache_key >> cache.WriteCache(
ie.current_env().get_cache_manager(p_origin),
second_pcoll_cache_key))
# The 2 pipelines should be the same now.
assert_pipeline_equal(self, p_copy, p_origin)
def test_instrument_example_pipeline_to_read_cache(self):
p_origin, init_pcoll, second_pcoll = self._example_pipeline()
p_copy, _, _ = self._example_pipeline(False)
# Mock as if cacheable PCollections are cached.
init_pcoll_cache_key = self.cache_key_of('init_pcoll', init_pcoll)
self._mock_write_cache(p_origin, [b'1', b'2', b'3'], init_pcoll_cache_key)
second_pcoll_cache_key = self.cache_key_of('second_pcoll', second_pcoll)
self._mock_write_cache(p_origin, [b'1', b'4', b'9'], second_pcoll_cache_key)
# Mark the completeness of PCollections from the original(user) pipeline.
ie.current_env().mark_pcollection_computed((init_pcoll, second_pcoll))
ie.current_env().add_derived_pipeline(p_origin, p_copy)
instr.build_pipeline_instrument(p_copy)
cached_init_pcoll = (
p_origin
| '_ReadCache_' + init_pcoll_cache_key >> cache.ReadCache(
ie.current_env().get_cache_manager(p_origin), init_pcoll_cache_key)
| 'unreify' >> beam.Map(lambda _: _))
# second_pcoll is never used as input and there is no need to read cache.
class TestReadCacheWireVisitor(PipelineVisitor):
"""Replace init_pcoll with cached_init_pcoll for all occuring inputs."""
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if transform_node.inputs:
input_list = list(transform_node.inputs)
for i in range(len(input_list)):
if input_list[i] == init_pcoll:
input_list[i] = cached_init_pcoll
transform_node.inputs = tuple(input_list)
v = TestReadCacheWireVisitor()
p_origin.visit(v)
assert_pipeline_equal(self, p_origin, p_copy)
def test_find_out_correct_user_pipeline(self):
# This is the user pipeline instance we care in the watched scope.
user_pipeline, _, _ = self._example_pipeline()
# This is a new runner pipeline instance with the same pipeline graph to
# what the user_pipeline represents.
runner_pipeline = beam.pipeline.Pipeline.from_runner_api(
user_pipeline.to_runner_api(), user_pipeline.runner, options=None)
ie.current_env().add_derived_pipeline(user_pipeline, runner_pipeline)
# This is a totally irrelevant user pipeline in the watched scope.
irrelevant_user_pipeline = beam.Pipeline(
interactive_runner.InteractiveRunner())
ib.watch({'irrelevant_user_pipeline': irrelevant_user_pipeline})
# Build instrument from the runner pipeline.
pipeline_instrument = instr.build_pipeline_instrument(runner_pipeline)
self.assertIs(pipeline_instrument.user_pipeline, user_pipeline)
def test_instrument_example_unbounded_pipeline_to_read_cache(self):
"""Tests that the instrumenter works for a single unbounded source.
"""
# Create the pipeline that will be instrumented.
p_original = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(
StreamingCache(cache_dir=None), p_original)
source_1 = p_original | 'source1' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
# pylint: disable=possibly-unused-variable
pcoll_1 = source_1 | 'square1' >> beam.Map(lambda x: x * x)
# Mock as if cacheable PCollections are cached.
ib.watch(locals())
for name, pcoll in locals().items():
if not isinstance(pcoll, beam.pvalue.PCollection):
continue
cache_key = self.cache_key_of(name, pcoll)
self._mock_write_cache(p_original, [], cache_key)
# Instrument the original pipeline to create the pipeline the user will see.
instrumenter = instr.build_pipeline_instrument(p_original)
actual_pipeline = beam.Pipeline.from_runner_api(
proto=instrumenter.instrumented_pipeline_proto(),
runner=interactive_runner.InteractiveRunner(),
options=None)
# Now, build the expected pipeline which replaces the unbounded source with
# a TestStream.
source_1_cache_key = self.cache_key_of('source_1', source_1)
p_expected = beam.Pipeline()
test_stream = (p_expected | TestStream(output_tags=[source_1_cache_key]))
# pylint: disable=expression-not-assigned
test_stream[source_1_cache_key] | 'square1' >> beam.Map(lambda x: x * x)
# Test that the TestStream is outputting to the correct PCollection.
class TestStreamVisitor(PipelineVisitor):
def __init__(self):
self.output_tags = set()
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
transform = transform_node.transform
if isinstance(transform, TestStream):
self.output_tags = transform.output_tags
v = TestStreamVisitor()
actual_pipeline.visit(v)
expected_output_tags = set([source_1_cache_key])
actual_output_tags = v.output_tags
self.assertSetEqual(expected_output_tags, actual_output_tags)
# Test that the pipeline is as expected.
assert_pipeline_proto_equal(
self,
p_expected.to_runner_api(),
instrumenter.instrumented_pipeline_proto())
def test_able_to_cache_intermediate_unbounded_source_pcollection(self):
"""Tests being able to cache an intermediate source PCollection.
In the following pipeline, the source doesn't have a reference and so is
not automatically cached in the watch() command. This tests that this case
is taken care of.
"""
# Create the pipeline that will be instrumented.
from apache_beam.options.pipeline_options import StandardOptions
options = StandardOptions(streaming=True)
streaming_cache_manager = StreamingCache(cache_dir=None)
p_original = beam.Pipeline(interactive_runner.InteractiveRunner(), options)
ie.current_env().set_cache_manager(streaming_cache_manager, p_original)
# pylint: disable=possibly-unused-variable
source_1 = (
p_original
| 'source1' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
| beam.Map(lambda e: e))
# Watch but do not cache the PCollections.
ib.watch(locals())
# Make sure that sources without a user reference are still cached.
instr.watch_sources(p_original)
intermediate_source_pcoll = None
for watching in ie.current_env().watching():
watching = list(watching)
for var, watchable in watching:
if 'synthetic' in var:
intermediate_source_pcoll = watchable
break
# Instrument the original pipeline to create the pipeline the user will see.
p_copy = beam.Pipeline.from_runner_api(
p_original.to_runner_api(),
runner=interactive_runner.InteractiveRunner(),
options=options)
instrumenter = instr.build_pipeline_instrument(p_copy)
actual_pipeline = beam.Pipeline.from_runner_api(
proto=instrumenter.instrumented_pipeline_proto(),
runner=interactive_runner.InteractiveRunner(),
options=options)
# Now, build the expected pipeline which replaces the unbounded source with
# a TestStream.
intermediate_source_pcoll_cache_key = \
self.cache_key_of('synthetic_var_' + str(id(intermediate_source_pcoll)),
intermediate_source_pcoll)
p_expected = beam.Pipeline()
ie.current_env().set_cache_manager(streaming_cache_manager, p_expected)
test_stream = (
p_expected
| TestStream(output_tags=[intermediate_source_pcoll_cache_key]))
# pylint: disable=expression-not-assigned
(
test_stream[intermediate_source_pcoll_cache_key]
| 'square1' >> beam.Map(lambda e: e)
| 'reify' >> beam.Map(lambda _: _)
| cache.WriteCache(
ie.current_env().get_cache_manager(p_expected), 'unused'))
# Test that the TestStream is outputting to the correct PCollection.
class TestStreamVisitor(PipelineVisitor):
def __init__(self):
self.output_tags = set()
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
transform = transform_node.transform
if isinstance(transform, TestStream):
self.output_tags = transform.output_tags
v = TestStreamVisitor()
actual_pipeline.visit(v)
expected_output_tags = set([intermediate_source_pcoll_cache_key])
actual_output_tags = v.output_tags
self.assertSetEqual(expected_output_tags, actual_output_tags)
# Test that the pipeline is as expected.
assert_pipeline_proto_equal(
self,
p_expected.to_runner_api(),
instrumenter.instrumented_pipeline_proto())
def test_instrument_mixed_streaming_batch(self):
"""Tests caching for both batch and streaming sources in the same pipeline.
This ensures that cached bounded and unbounded sources are read from the
TestStream.
"""
# Create the pipeline that will be instrumented.
from apache_beam.options.pipeline_options import StandardOptions
options = StandardOptions(streaming=True)
p_original = beam.Pipeline(interactive_runner.InteractiveRunner(), options)
streaming_cache_manager = StreamingCache(cache_dir=None)
ie.current_env().set_cache_manager(streaming_cache_manager, p_original)
source_1 = p_original | 'source1' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
source_2 = p_original | 'source2' >> beam.Create([1, 2, 3, 4, 5])
# pylint: disable=possibly-unused-variable
pcoll_1 = ((source_1, source_2)
| beam.Flatten()
| 'square1' >> beam.Map(lambda x: x * x))
# Watch but do not cache the PCollections.
ib.watch(locals())
self._mock_write_cache(
p_original, [], self.cache_key_of('source_2', source_2))
ie.current_env().mark_pcollection_computed([source_2])
# Instrument the original pipeline to create the pipeline the user will see.
p_copy = beam.Pipeline.from_runner_api(
p_original.to_runner_api(),
runner=interactive_runner.InteractiveRunner(),
options=options)
ie.current_env().add_derived_pipeline(p_original, p_copy)
instrumenter = instr.build_pipeline_instrument(p_copy)
actual_pipeline = beam.Pipeline.from_runner_api(
proto=instrumenter.instrumented_pipeline_proto(),
runner=interactive_runner.InteractiveRunner(),
options=options)
# Now, build the expected pipeline which replaces the unbounded source with
# a TestStream.
source_1_cache_key = self.cache_key_of('source_1', source_1)
source_2_cache_key = self.cache_key_of('source_2', source_2)
p_expected = beam.Pipeline()
ie.current_env().set_cache_manager(streaming_cache_manager, p_expected)
test_stream = (
p_expected
| TestStream(output_tags=[source_1_cache_key, source_2_cache_key]))
# pylint: disable=expression-not-assigned
((
test_stream[self.cache_key_of('source_1', source_1)],
test_stream[self.cache_key_of('source_2', source_2)])
| beam.Flatten()
| 'square1' >> beam.Map(lambda x: x * x)
| 'reify' >> beam.Map(lambda _: _)
| cache.WriteCache(
ie.current_env().get_cache_manager(p_expected), 'unused'))
# Test that the TestStream is outputting to the correct PCollection.
class TestStreamVisitor(PipelineVisitor):
def __init__(self):
self.output_tags = set()
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
transform = transform_node.transform
if isinstance(transform, TestStream):
self.output_tags = transform.output_tags
v = TestStreamVisitor()
actual_pipeline.visit(v)
expected_output_tags = set([source_1_cache_key, source_2_cache_key])
actual_output_tags = v.output_tags
self.assertSetEqual(expected_output_tags, actual_output_tags)
# Test that the pipeline is as expected.
assert_pipeline_proto_equal(
self,
p_expected.to_runner_api(),
instrumenter.instrumented_pipeline_proto())
def test_instrument_example_unbounded_pipeline_direct_from_source(self):
"""Tests that the it caches PCollections from a source.
"""
# Create the pipeline that will be instrumented.
from apache_beam.options.pipeline_options import StandardOptions
options = StandardOptions(streaming=True)
p_original = beam.Pipeline(interactive_runner.InteractiveRunner(), options)
ie.current_env().set_cache_manager(
StreamingCache(cache_dir=None), p_original)
source_1 = p_original | 'source1' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
# pylint: disable=possibly-unused-variable
# Watch but do not cache the PCollections.
ib.watch(locals())
# Instrument the original pipeline to create the pipeline the user will see.
p_copy = beam.Pipeline.from_runner_api(
p_original.to_runner_api(),
runner=interactive_runner.InteractiveRunner(),
options=options)
instrumenter = instr.build_pipeline_instrument(p_copy)
actual_pipeline = beam.Pipeline.from_runner_api(
proto=instrumenter.instrumented_pipeline_proto(),
runner=interactive_runner.InteractiveRunner(),
options=options)
# Now, build the expected pipeline which replaces the unbounded source with
# a TestStream.
source_1_cache_key = self.cache_key_of('source_1', source_1)
p_expected = beam.Pipeline()
# pylint: disable=unused-variable
test_stream = (
p_expected
| TestStream(output_tags=[self.cache_key_of('source_1', source_1)]))
# Test that the TestStream is outputting to the correct PCollection.
class TestStreamVisitor(PipelineVisitor):
def __init__(self):
self.output_tags = set()
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
transform = transform_node.transform
if isinstance(transform, TestStream):
self.output_tags = transform.output_tags
v = TestStreamVisitor()
actual_pipeline.visit(v)
expected_output_tags = set([source_1_cache_key])
actual_output_tags = v.output_tags
self.assertSetEqual(expected_output_tags, actual_output_tags)
# Test that the pipeline is as expected.
assert_pipeline_proto_equal(
self,
p_expected.to_runner_api(),
instrumenter.instrumented_pipeline_proto())
def test_instrument_example_unbounded_pipeline_to_read_cache_not_cached(self):
"""Tests that the instrumenter works when the PCollection is not cached.
"""
# Create the pipeline that will be instrumented.
from apache_beam.options.pipeline_options import StandardOptions
options = StandardOptions(streaming=True)
p_original = beam.Pipeline(interactive_runner.InteractiveRunner(), options)
ie.current_env().set_cache_manager(
StreamingCache(cache_dir=None), p_original)
source_1 = p_original | 'source1' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
# pylint: disable=possibly-unused-variable
pcoll_1 = source_1 | 'square1' >> beam.Map(lambda x: x * x)
# Watch but do not cache the PCollections.
ib.watch(locals())
# Instrument the original pipeline to create the pipeline the user will see.
p_copy = beam.Pipeline.from_runner_api(
p_original.to_runner_api(),
runner=interactive_runner.InteractiveRunner(),
options=options)
instrumenter = instr.build_pipeline_instrument(p_copy)
actual_pipeline = beam.Pipeline.from_runner_api(
proto=instrumenter.instrumented_pipeline_proto(),
runner=interactive_runner.InteractiveRunner(),
options=options)
# Now, build the expected pipeline which replaces the unbounded source with
# a TestStream.
source_1_cache_key = self.cache_key_of('source_1', source_1)
p_expected = beam.Pipeline()
ie.current_env().set_cache_manager(
StreamingCache(cache_dir=None), p_expected)
test_stream = (p_expected | TestStream(output_tags=[source_1_cache_key]))
# pylint: disable=expression-not-assigned
(
test_stream[source_1_cache_key]
| 'square1' >> beam.Map(lambda x: x * x)
| 'reify' >> beam.Map(lambda _: _)
| cache.WriteCache(
ie.current_env().get_cache_manager(p_expected), 'unused'))
# Test that the TestStream is outputting to the correct PCollection.
class TestStreamVisitor(PipelineVisitor):
def __init__(self):
self.output_tags = set()
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
transform = transform_node.transform
if isinstance(transform, TestStream):
self.output_tags = transform.output_tags
v = TestStreamVisitor()
actual_pipeline.visit(v)
expected_output_tags = set([source_1_cache_key])
actual_output_tags = v.output_tags
self.assertSetEqual(expected_output_tags, actual_output_tags)
# Test that the pipeline is as expected.
assert_pipeline_proto_equal(
self,
p_expected.to_runner_api(),
instrumenter.instrumented_pipeline_proto())
def test_instrument_example_unbounded_pipeline_to_multiple_read_cache(self):
"""Tests that the instrumenter works for multiple unbounded sources.
"""
# Create the pipeline that will be instrumented.
p_original = beam.Pipeline(interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(
StreamingCache(cache_dir=None), p_original)
source_1 = p_original | 'source1' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
source_2 = p_original | 'source2' >> beam.io.ReadFromPubSub(
subscription='projects/fake-project/subscriptions/fake_sub')
# pylint: disable=possibly-unused-variable
pcoll_1 = source_1 | 'square1' >> beam.Map(lambda x: x * x)
# pylint: disable=possibly-unused-variable
pcoll_2 = source_2 | 'square2' >> beam.Map(lambda x: x * x)
# Mock as if cacheable PCollections are cached.
ib.watch(locals())
for name, pcoll in locals().items():
if not isinstance(pcoll, beam.pvalue.PCollection):
continue
cache_key = self.cache_key_of(name, pcoll)
self._mock_write_cache(p_original, [], cache_key)
# Instrument the original pipeline to create the pipeline the user will see.
instrumenter = instr.build_pipeline_instrument(p_original)
actual_pipeline = beam.Pipeline.from_runner_api(
proto=instrumenter.instrumented_pipeline_proto(),
runner=interactive_runner.InteractiveRunner(),
options=None)
# Now, build the expected pipeline which replaces the unbounded source with
# a TestStream.
source_1_cache_key = self.cache_key_of('source_1', source_1)
source_2_cache_key = self.cache_key_of('source_2', source_2)
p_expected = beam.Pipeline()
test_stream = (
p_expected
| TestStream(
output_tags=[
self.cache_key_of('source_1', source_1),
self.cache_key_of('source_2', source_2)
]))
# pylint: disable=expression-not-assigned
test_stream[source_1_cache_key] | 'square1' >> beam.Map(lambda x: x * x)
# pylint: disable=expression-not-assigned
test_stream[source_2_cache_key] | 'square2' >> beam.Map(lambda x: x * x)
# Test that the TestStream is outputting to the correct PCollection.
class TestStreamVisitor(PipelineVisitor):
def __init__(self):
self.output_tags = set()
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
transform = transform_node.transform
if isinstance(transform, TestStream):
self.output_tags = transform.output_tags
v = TestStreamVisitor()
actual_pipeline.visit(v)
expected_output_tags = set([source_1_cache_key, source_2_cache_key])
actual_output_tags = v.output_tags
self.assertSetEqual(expected_output_tags, actual_output_tags)
# Test that the pipeline is as expected.
assert_pipeline_proto_equal(
self,
p_expected.to_runner_api(),
instrumenter.instrumented_pipeline_proto())
def test_pipeline_pruned_when_input_pcoll_is_cached(self):
user_pipeline, init_pcoll, _ = self._example_pipeline()
runner_pipeline = beam.Pipeline.from_runner_api(
user_pipeline.to_runner_api(), user_pipeline.runner, None)
ie.current_env().add_derived_pipeline(user_pipeline, runner_pipeline)
# Mock as if init_pcoll is cached.
init_pcoll_cache_key = self.cache_key_of('init_pcoll', init_pcoll)
self._mock_write_cache(
user_pipeline, [b'1', b'2', b'3'], init_pcoll_cache_key)
ie.current_env().mark_pcollection_computed([init_pcoll])
# Build an instrument from the runner pipeline.
pipeline_instrument = instr.build_pipeline_instrument(runner_pipeline)
pruned_proto = pipeline_instrument.instrumented_pipeline_proto()
# Skip the prune step for comparison, it should contain the sub-graph that
# produces init_pcoll but not useful anymore.
full_proto = pipeline_instrument._pipeline.to_runner_api()
self.assertEqual(
len(
pruned_proto.components.transforms[
'ref_AppliedPTransform_AppliedPTransform_1'].subtransforms),
5)
assert_pipeline_proto_not_contain_top_level_transform(
self, pruned_proto, 'Init Source')
self.assertEqual(
len(
full_proto.components.transforms[
'ref_AppliedPTransform_AppliedPTransform_1'].subtransforms),
6)
assert_pipeline_proto_contain_top_level_transform(
self, full_proto, 'Init-Source')
def test_side_effect_pcoll_is_included(self):
pipeline_with_side_effect = beam.Pipeline(
interactive_runner.InteractiveRunner())
ie.current_env().set_cache_manager(
InMemoryCache(), pipeline_with_side_effect)
# Deliberately not assign the result to a variable to make it a
# "side effect" transform. Note we never watch anything from
# the pipeline defined locally either.
# pylint: disable=range-builtin-not-iterating,expression-not-assigned
pipeline_with_side_effect | 'Init Create' >> beam.Create(range(10))
pipeline_instrument = instr.build_pipeline_instrument(
pipeline_with_side_effect)
self.assertTrue(pipeline_instrument._extended_targets)
if __name__ == '__main__':
unittest.main()
| lukecwik/incubator-beam | sdks/python/apache_beam/runners/interactive/pipeline_instrument_test.py | Python | apache-2.0 | 35,015 | [
"VisIt"
] | 58a461ed072456d357b3231d8b7a0ed9e1d0c6064157e570a15ba0a6de44585b |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
import numpy as np
class Preprocessing(object):
"""
Preprocessing class used for spectrum preprocessing.
"""
def __init__(self, spectrum):
"""
Create an Preprocessing object
Args:
spectrum (pymatgen.core.spectrum.Spectrum): Spectrum object used to
initialize preprocessing class.
"""
self.spectrum = spectrum
self.process_tag = []
self.proc_dict = {
'1st_der': 'first_derivative',
'2nd_der': 'second_derivative',
'vecnorm': 'vector_norm_normalize',
'maxnorm': 'maximum_intensity_norm',
'areanorm': 'area_normalize',
'snvnorm': 'snv_norm',
'square': 'square_root_squashing',
'sigmoid': 'sigmoid_squashing',
'1st_wt': 'weighted_first_derivative',
'2nd_wt': 'weighted_second_derivative',
'intnorm': 'intensity_normalize'
}
@property
def preprocessing_method(self):
"""
Returns: a list of available preprocessing methods
"""
return list(self.proc_dict.values())
def first_derivative(self):
"""
Return first derivative as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 1)
self.spectrum.x, self.spectrum.y = np.copy(deriv_x), np.copy(deriv_y)
def second_derivative(self):
"""
Return second derivative as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 2)
self.spectrum.x, self.spectrum.y = np.copy(deriv_x), np.copy(deriv_y)
def weighted_first_derivative(self):
"""
Return weighted first derivative spectrum as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 1)
self.spectrum.x, self.spectrum.y = deriv_x, np.multiply(
self.spectrum.y[:-1], deriv_y)
def weighted_second_derivative(self):
"""
Return weighted second derivative spectrum as spectrum
"""
deriv_x, deriv_y = self.derivative_spect(self.spectrum, 2)
self.spectrum.x, self.spectrum.y = deriv_x, np.multiply(
self.spectrum.y[:-2], deriv_y)
def intensity_normalize(self):
"""
Normalize with respect to the intensity sum
"""
self.spectrum.normalize('sum')
def maximum_intensity_norm(self):
"""
Normalize with respect to the maximum intensity
"""
self.spectrum.normalize('max')
def vector_norm_normalize(self):
"""
Normalize with respect to the norm of the spectrum as a vector
"""
spect_norm = np.linalg.norm(self.spectrum.y)
self.spectrum.y /= spect_norm
def area_normalize(self):
"""
Normalize the peak intensity using under curve area, i.e. normalized
curve's under curve area should equals 1
"""
under_curve_area = np.trapz(self.spectrum.y, self.spectrum.x)
self.spectrum.y /= under_curve_area
def snv_norm(self):
"""
Normalize with repect to the variance of the spectrum intensity and
return abs. spectrum
"""
inten_mean = np.mean(self.spectrum.y)
inten_std = np.mean(self.spectrum.y)
normalized_mu = np.divide(np.subtract(self.spectrum.y, inten_mean), inten_std)
# Since snv norm will return negative absorption value after
# normalization, need to add
# the minimum absorption value and shift the baseline back to zero
min_norm_mu = np.abs(np.min(normalized_mu))
normalized_mu = np.add(normalized_mu, min_norm_mu)
self.spectrum.y = normalized_mu
def square_root_squashing(self):
"""
Squashing the spectrum using square root of the spectrum
"""
squashed_mu = np.sqrt(np.abs(self.spectrum.y))
self.spectrum.y = squashed_mu
def sigmoid_squashing(self):
"""
Squashing the spectrum using the sigmoid funtion, i.e.
squashed_y = (1 - cos(pi*spectrum.y))/2
"""
squashed_mu = np.divide(np.subtract(1, np.cos(np.pi * self.spectrum.y)),
2)
self.spectrum.y = squashed_mu
def derivative_spect(self, spect1, order):
"""
Calculate derivative of a given spectrum, to keep returned spectrum
dimension consistent, endpoints are not pad with endvalues
Args:
spect1: Given spectrum with spect1.x corresponding to energy.
spect1.y corresponding to absorption
order: The number of times the spectrum are differenced
Returns: Differenciated x and y
"""
deriv_x = np.copy(spect1.x)
deriv_y = np.copy(spect1.y)
def first_derivative(x, y):
derivative = np.diff(y) / np.diff(x)
return x[:-1], derivative
while order >= 1:
deriv_x, deriv_y = first_derivative(deriv_x, deriv_y)
order -= 1
return deriv_x, deriv_y
def spectrum_process(self, process_seq):
"""
Preprocess the self.spectrum object using the preprocess method listed
in process_seq
Args:
process_seq (list/tuple/string): preprocessing methods
"""
if (process_seq is not None) and (isinstance(process_seq, list) or
isinstance(process_seq, tuple)):
for pro in process_seq:
getattr(self, self.proc_dict[pro])()
self.process_tag.append(pro)
if (process_seq is not None) and isinstance(process_seq, str):
getattr(self, self.proc_dict[process_seq])()
self.process_tag.append(process_seq)
| materialsvirtuallab/veidt | veidt/elsie/preprocessing.py | Python | bsd-3-clause | 5,913 | [
"pymatgen"
] | 003c142ee620c8e747479f8f695afeb171640802b4afa32678d6818328830947 |
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
exec(open('graftm/version.py').read()) # loads __version__
setup(name='graftm',
version=__version__,
author='Joel Boyd, Ben Woodcroft',
description='GraftM is a pipeline used for identifying and classifying marker gene reads from metagenomic datasets',
long_description=readme,
description_content_type="text/markdown",
long_description_content_type="text/markdown",
license='GPL3+',
keywords="",
packages=find_packages(exclude='docs'),
install_requires=('biopython >=1.64',
'biom-format >=2.1.4',
'extern >=0.0.4',
'taxtastic >=0.5.4',
'bird_tool_utils',
'DendroPy >= 4.1.0'),
setup_requires=['nose>=1.0'],
test_suite='nose.collector',
url='http://geronimp.github.io/graftM',
scripts=['bin/graftM'],
data_files=[
('share', ['share/18S.hmm']),
],
)
| geronimp/graftM | setup.py | Python | gpl-3.0 | 1,093 | [
"Biopython"
] | 75331dadf850f34a5aaa2a2ef7bfdc0aa4c2565a51892c96f4a5981782c1170b |
# this module contains functions for doing complex Gaussian math. Right
# now everything is hard coded for adiabatic/diabatic representation, but
# it shouldn't be hard to modify for DGAS
import cmath
import math
import types
import numpy as np
from pyspawn.fmsobj import fmsobj
from pyspawn.traj import traj
# compute the overlap of two vibronic TBFs (electronic part included)
def overlap_nuc_elec(ti, tj, positions_i="positions", positions_j="positions", momenta_i="momenta",
momenta_j="momenta"):
if ti.get_istate() == tj.get_istate():
Sij = overlap_nuc(ti, tj, positions_i=positions_i, positions_j=positions_j, momenta_i=momenta_i,
momenta_j=momenta_j)
else:
Sij = complex(0.0, 0.0)
return Sij
# compute the overlap of two nuclear TBFs (electronic part not included)
def overlap_nuc(ti, tj, positions_i="positions", positions_j="positions", momenta_i="momenta", momenta_j="momenta"):
if isinstance(positions_i, types.StringTypes):
ri = eval("ti.get_" + positions_i + "()")
else:
ri = positions_i
if isinstance(positions_j, types.StringTypes):
rj = eval("tj.get_" + positions_j + "()")
else:
rj = positions_j
if isinstance(momenta_i, types.StringTypes):
pi = eval("ti.get_" + momenta_i + "()")
else:
pi = momenta_i
if isinstance(momenta_j, types.StringTypes):
pj = eval("tj.get_" + momenta_j + "()")
else:
pj = momenta_j
widthsi = ti.get_widths()
widthsj = tj.get_widths()
Sij = 1.0
for idim in range(ti.get_numdims()):
xi = ri[idim]
xj = rj[idim]
di = pi[idim]
dj = pj[idim]
xwi = widthsi[idim]
xwj = widthsj[idim]
Sij *= overlap_nuc_1d(xi, xj, di, dj, xwi, xwj)
return Sij
# compute the kinetic energy matrix element between two vibronic TBFs
def kinetic_nuc_elec(ti, tj, positions_i="positions", positions_j="positions", momenta_i="momenta",
momenta_j="momenta"):
if ti.get_istate() == tj.get_istate():
Tij = kinetic_nuc(ti, tj, positions_i=positions_i, positions_j=positions_j, momenta_i=momenta_i,
momenta_j=momenta_j)
else:
Tij = complex(0.0, 0.0)
return Tij
# compute the kinetic energy matrix element between two nuclear TBFs
def kinetic_nuc(ti, tj, positions_i="positions", positions_j="positions", momenta_i="momenta", momenta_j="momenta"):
ri = eval("ti.get_" + positions_i + "()")
rj = eval("tj.get_" + positions_j + "()")
pi = eval("ti.get_" + momenta_i + "()")
pj = eval("tj.get_" + momenta_j + "()")
widthsi = ti.get_widths()
widthsj = tj.get_widths()
massesi = ti.get_masses()
ndim = ti.get_numdims()
S1D = np.zeros(ndim, dtype=np.complex128)
T1D = np.zeros(ndim, dtype=np.complex128)
for idim in range(ndim):
xi = ri[idim]
xj = rj[idim]
di = pi[idim]
dj = pj[idim]
xwi = widthsi[idim]
xwj = widthsj[idim]
m = massesi[idim]
T1D[idim] = 0.5 * kinetic_nuc_1d(xi, xj, di, dj, xwi, xwj) / m
S1D[idim] = overlap_nuc_1d(xi, xj, di, dj, xwi, xwj)
Tij = 0.0
for idim in range(ndim):
Ttmp = T1D[idim]
# print "T1D[idim] ", T1D[idim], Ttmp
for jdim in range(ndim):
if jdim != idim:
Ttmp *= S1D[jdim]
# print "S1D[jdim]", S1D[jdim], Ttmp
Tij += Ttmp
# print "Tij ", Tij
return Tij
# compute the Sdot matrix element between two vibronic TBFs
def Sdot_nuc_elec(ti, tj, positions_i="positions", positions_j="positions", momenta_i="momenta", momenta_j="momenta",
forces_j="forces"):
if ti.get_istate() == tj.get_istate():
Sdot_ij = Sdot_nuc(ti, tj, positions_i=positions_i, positions_j=positions_j, momenta_i=momenta_i,
momenta_j=momenta_j, forces_j=forces_j)
else:
Sdot_ij = complex(0.0, 0.0)
return Sdot_ij
# compute the Sdot matrix element between two nuclear TBFs
def Sdot_nuc(ti, tj, positions_i="positions", positions_j="positions", momenta_i="momenta", momenta_j="momenta",
forces_j="forces"):
c1i = (complex(0.0, 1.0))
ri = eval("ti.get_" + positions_i + "()")
rj = eval("tj.get_" + positions_j + "()")
pi = eval("ti.get_" + momenta_i + "()")
pj = eval("tj.get_" + momenta_j + "()")
fj = eval("tj.get_" + forces_j + "()")
widthsi = ti.get_widths()
widthsj = tj.get_widths()
massesi = ti.get_masses()
ndim = ti.get_numdims()
Sij = overlap_nuc(ti, tj, positions_i=positions_i, positions_j=positions_j, momenta_i=momenta_i,
momenta_j=momenta_j)
deltar = ri - rj
psum = pi + pj
pdiff = pi - pj
o4wj = 0.25 / widthsj
Cdbydr = widthsj * deltar - (0.5 * c1i) * psum
Cdbydp = o4wj * pdiff + (0.5 * c1i) * deltar
Ctemp1 = Cdbydr * pj / massesi + Cdbydp * fj
Ctemp = np.sum(Ctemp1)
Sdot_ij = Ctemp * Sij
return Sdot_ij
# compute 1-dimensional nuclear overlaps
def overlap_nuc_1d(xi, xj, di, dj, xwi, xwj):
c1i = (complex(0.0, 1.0))
deltax = xi - xj
pdiff = di - dj
osmwid = 1.0 / (xwi + xwj)
xrarg = osmwid * (xwi * xwj * deltax * deltax + 0.25 * pdiff * pdiff)
if (xrarg < 10.0):
gmwidth = math.sqrt(xwi * xwj)
ctemp = (di * xi - dj * xj)
ctemp = ctemp - osmwid * (xwi * xi + xwj * xj) * pdiff
cgold = math.sqrt(2.0 * gmwidth * osmwid)
cgold = cgold * math.exp(-1.0 * xrarg)
cgold = cgold * cmath.exp(ctemp * c1i)
else:
cgold = 0.0
return cgold
# compute 1-dimensional nuclear kinetic energy matrix elements
def kinetic_nuc_1d(xi, xj, di, dj, xwi, xwj):
c1i = (complex(0.0, 1.0))
psum = di + dj
deltax = xi - xj
dkerfac = xwi + 0.25 * psum * psum - xwi * xwi * deltax * deltax
dkeifac = xwi * deltax * psum
olap = overlap_nuc_1d(xi, xj, di, dj, xwi, xwj)
kinetic = (dkerfac + c1i * dkeifac) * olap
return kinetic
| blevine37/pySpawn17 | pyspawn/complexgaussian.py | Python | mit | 6,103 | [
"Gaussian"
] | 137e35bc9e42c2b73a638520393d75fe0f0aabfea12d52e9e5b82954d93e086d |
"""
Module to handle gamma matrices expressed as tensor objects.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> G(i)
GammaMatrix(i)
Note that there is already an instance of GammaMatrixHead in four dimensions:
GammaMatrix, which is simply declare as
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> GammaMatrix(i)
GammaMatrix(i)
To access the metric tensor
>>> LorentzIndex.metric
metric(LorentzIndex,LorentzIndex)
"""
from sympy import S, Mul, eye, trace
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
TensMul, TensAdd, tensor_mul, Tensor, TensorHead, TensorSymmetry
from sympy.core.compatibility import range
# DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_name="S")
LorentzIndex = TensorIndexType('LorentzIndex', dim=4, dummy_name="L")
GammaMatrix = TensorHead("GammaMatrix", [LorentzIndex],
TensorSymmetry.no_symmetry(1), comm=None)
def extract_type_tens(expression, component):
"""
Extract from a ``TensExpr`` all tensors with `component`.
Returns two tensor expressions:
* the first contains all ``Tensor`` of having `component`.
* the second contains all remaining.
"""
if isinstance(expression, Tensor):
sp = [expression]
elif isinstance(expression, TensMul):
sp = expression.args
else:
raise ValueError('wrong type')
# Collect all gamma matrices of the same dimension
new_expr = S.One
residual_expr = S.One
for i in sp:
if isinstance(i, Tensor) and i.component == component:
new_expr *= i
else:
residual_expr *= i
return new_expr, residual_expr
def simplify_gamma_expression(expression):
extracted_expr, residual_expr = extract_type_tens(expression, GammaMatrix)
res_expr = _simplify_single_line(extracted_expr)
return res_expr * residual_expr
def simplify_gpgp(ex, sort=True):
"""
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, simplify_gpgp
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> simplify_gpgp(ps*qs*qs)
GammaMatrix(-L_0)*p(L_0)*q(L_1)*q(-L_1)
"""
def _simplify_gpgp(ex):
components = ex.components
a = []
comp_map = []
for i, comp in enumerate(components):
comp_map.extend([i]*comp.rank)
dum = [(i[0], i[1], comp_map[i[0]], comp_map[i[1]]) for i in ex.dum]
for i in range(len(components)):
if components[i] != GammaMatrix:
continue
for dx in dum:
if dx[2] == i:
p_pos1 = dx[3]
elif dx[3] == i:
p_pos1 = dx[2]
else:
continue
comp1 = components[p_pos1]
if comp1.comm == 0 and comp1.rank == 1:
a.append((i, p_pos1))
if not a:
return ex
elim = set()
tv = []
hit = True
coeff = S.One
ta = None
while hit:
hit = False
for i, ai in enumerate(a[:-1]):
if ai[0] in elim:
continue
if ai[0] != a[i + 1][0] - 1:
continue
if components[ai[1]] != components[a[i + 1][1]]:
continue
elim.add(ai[0])
elim.add(ai[1])
elim.add(a[i + 1][0])
elim.add(a[i + 1][1])
if not ta:
ta = ex.split()
mu = TensorIndex('mu', LorentzIndex)
hit = True
if i == 0:
coeff = ex.coeff
tx = components[ai[1]](mu)*components[ai[1]](-mu)
if len(a) == 2:
tx *= 4 # eye(4)
tv.append(tx)
break
if tv:
a = [x for j, x in enumerate(ta) if j not in elim]
a.extend(tv)
t = tensor_mul(*a)*coeff
# t = t.replace(lambda x: x.is_Matrix, lambda x: 1)
return t
else:
return ex
if sort:
ex = ex.sorted_components()
# this would be better off with pattern matching
while 1:
t = _simplify_gpgp(ex)
if t != ex:
ex = t
else:
return t
def gamma_trace(t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
gamma_trace, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
if isinstance(t, TensAdd):
res = TensAdd(*[_trace_single_line(x) for x in t.args])
return res
t = _simplify_single_line(t)
res = _trace_single_line(t)
return res
def _simplify_single_line(expression):
"""
Simplify single-line product of gamma matrices.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _simplify_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1 = tensor_indices('i0:2', LorentzIndex)
>>> _simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
0
"""
t1, t2 = extract_type_tens(expression, GammaMatrix)
if t1 != 1:
t1 = kahane_simplify(t1)
res = t1*t2
return res
def _trace_single_line(t):
"""
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
Notes
=====
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
indices trace over them; otherwise traces are not implied (explain)
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _trace_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> _trace_single_line(G(i0)*G(i1))
4*metric(i0, i1)
>>> _trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
0
"""
def _trace_single_line1(t):
t = t.sorted_components()
components = t.components
ncomps = len(components)
g = LorentzIndex.metric
# gamma matirices are in a[i:j]
hit = 0
for i in range(ncomps):
if components[i] == GammaMatrix:
hit = 1
break
for j in range(i + hit, ncomps):
if components[j] != GammaMatrix:
break
else:
j = ncomps
numG = j - i
if numG == 0:
tcoeff = t.coeff
return t.nocoeff if tcoeff else t
if numG % 2 == 1:
return TensMul.from_data(S.Zero, [], [], [])
elif numG > 4:
# find the open matrix indices and connect them:
a = t.split()
ind1 = a[i].get_indices()[0]
ind2 = a[i + 1].get_indices()[0]
aa = a[:i] + a[i + 2:]
t1 = tensor_mul(*aa)*g(ind1, ind2)
t1 = t1.contract_metric(g)
args = [t1]
sign = 1
for k in range(i + 2, j):
sign = -sign
ind2 = a[k].get_indices()[0]
aa = a[:i] + a[i + 1:k] + a[k + 1:]
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)
t2 = t2.contract_metric(g)
t2 = simplify_gpgp(t2, False)
args.append(t2)
t3 = TensAdd(*args)
t3 = _trace_single_line(t3)
return t3
else:
a = t.split()
t1 = _gamma_trace1(*a[i:j])
a2 = a[:i] + a[j:]
t2 = tensor_mul(*a2)
t3 = t1*t2
if not t3:
return t3
t3 = t3.contract_metric(g)
return t3
t = t.expand()
if isinstance(t, TensAdd):
a = [_trace_single_line1(x)*x.coeff for x in t.args]
return TensAdd(*a)
elif isinstance(t, (Tensor, TensMul)):
r = t.coeff*_trace_single_line1(t)
return r
else:
return trace(t)
def _gamma_trace1(*a):
gctr = 4 # FIXME specific for d=4
g = LorentzIndex.metric
if not a:
return gctr
n = len(a)
if n%2 == 1:
#return TensMul.from_data(S.Zero, [], [], [])
return S.Zero
if n == 2:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
return gctr*g(ind0, ind1)
if n == 4:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
ind2 = a[2].get_indices()[0]
ind3 = a[3].get_indices()[0]
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
def kahane_simplify(expression):
r"""
This function cancels contracted elements in a product of four
dimensional gamma matrices, resulting in an expression equal to the given
one, without the contracted gamma matrices.
Parameters
==========
`expression` the tensor expression containing the gamma matrices to simplify.
Notes
=====
If spinor indices are given, the matrices must be given in
the order given in the product.
Algorithm
=========
The idea behind the algorithm is to use some well-known identities,
i.e., for contractions enclosing an even number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
for an odd number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
Instead of repeatedly applying these identities to cancel out all contracted indices,
it is possible to recognize the links that would result from such an operation,
the problem is thus reduced to a simple rearrangement of free gamma matrices.
Examples
========
When using, always remember that the original expression coefficient
has to be handled separately
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.physics.hep.gamma_matrices import kahane_simplify
>>> from sympy.tensor.tensor import tensor_indices
>>> i0, i1, i2 = tensor_indices('i0:3', LorentzIndex)
>>> ta = G(i0)*G(-i0)
>>> kahane_simplify(ta)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> tb = G(i0)*G(i1)*G(-i0)
>>> kahane_simplify(tb)
-2*GammaMatrix(i1)
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
If there are no contractions, the same expression is returned
>>> tc = G(i0)*G(i1)
>>> kahane_simplify(tc)
GammaMatrix(i0)*GammaMatrix(i1)
References
==========
[1] Algorithm for Reducing Contracted Products of gamma Matrices,
Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
"""
if isinstance(expression, Mul):
return expression
if isinstance(expression, TensAdd):
return TensAdd(*[kahane_simplify(arg) for arg in expression.args])
if isinstance(expression, Tensor):
return expression
assert isinstance(expression, TensMul)
gammas = expression.args
for gamma in gammas:
assert gamma.component == GammaMatrix
free = expression.free
# spinor_free = [_ for _ in expression.free_in_args if _[1] != 0]
# if len(spinor_free) == 2:
# spinor_free.sort(key=lambda x: x[2])
# assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2
# assert spinor_free[0][2] == 0
# elif spinor_free:
# raise ValueError('spinor indices do not match')
dum = []
for dum_pair in expression.dum:
if expression.index_types[dum_pair[0]] == LorentzIndex:
dum.append((dum_pair[0], dum_pair[1]))
dum = sorted(dum)
if len(dum) == 0: # or GammaMatrixHead:
# no contractions in `expression`, just return it.
return expression
# find the `first_dum_pos`, i.e. the position of the first contracted
# gamma matrix, Kahane's algorithm as described in his paper requires the
# gamma matrix expression to start with a contracted gamma matrix, this is
# a workaround which ignores possible initial free indices, and re-adds
# them later.
first_dum_pos = min(map(min, dum))
# for p1, p2, a1, a2 in expression.dum_in_args:
# if p1 != 0 or p2 != 0:
# # only Lorentz indices, skip Dirac indices:
# continue
# first_dum_pos = min(p1, p2)
# break
total_number = len(free) + len(dum)*2
number_of_contractions = len(dum)
free_pos = [None]*total_number
for i in free:
free_pos[i[1]] = i[0]
# `index_is_free` is a list of booleans, to identify index position
# and whether that index is free or dummy.
index_is_free = [False]*total_number
for i, indx in enumerate(free):
index_is_free[indx[1]] = True
# `links` is a dictionary containing the graph described in Kahane's paper,
# to every key correspond one or two values, representing the linked indices.
# All values in `links` are integers, negative numbers are used in the case
# where it is necessary to insert gamma matrices between free indices, in
# order to make Kahane's algorithm work (see paper).
links = dict()
for i in range(first_dum_pos, total_number):
links[i] = []
# `cum_sign` is a step variable to mark the sign of every index, see paper.
cum_sign = -1
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
cum_sign_list = [None]*total_number
block_free_count = 0
# multiply `resulting_coeff` by the coefficient parameter, the rest
# of the algorithm ignores a scalar coefficient.
resulting_coeff = S.One
# initialize a list of lists of indices. The outer list will contain all
# additive tensor expressions, while the inner list will contain the
# free indices (rearranged according to the algorithm).
resulting_indices = [[]]
# start to count the `connected_components`, which together with the number
# of contractions, determines a -1 or +1 factor to be multiplied.
connected_components = 1
# First loop: here we fill `cum_sign_list`, and draw the links
# among consecutive indices (they are stored in `links`). Links among
# non-consecutive indices will be drawn later.
for i, is_free in enumerate(index_is_free):
# if `expression` starts with free indices, they are ignored here;
# they are later added as they are to the beginning of all
# `resulting_indices` list of lists of indices.
if i < first_dum_pos:
continue
if is_free:
block_free_count += 1
# if previous index was free as well, draw an arch in `links`.
if block_free_count > 1:
links[i - 1].append(i)
links[i].append(i - 1)
else:
# Change the sign of the index (`cum_sign`) if the number of free
# indices preceding it is even.
cum_sign *= 1 if (block_free_count % 2) else -1
if block_free_count == 0 and i != first_dum_pos:
# check if there are two consecutive dummy indices:
# in this case create virtual indices with negative position,
# these "virtual" indices represent the insertion of two
# gamma^0 matrices to separate consecutive dummy indices, as
# Kahane's algorithm requires dummy indices to be separated by
# free indices. The product of two gamma^0 matrices is unity,
# so the new expression being examined is the same as the
# original one.
if cum_sign == -1:
links[-1-i] = [-1-i+1]
links[-1-i+1] = [-1-i]
if (i - cum_sign) in links:
if i != first_dum_pos:
links[i].append(i - cum_sign)
if block_free_count != 0:
if i - cum_sign < len(index_is_free):
if index_is_free[i - cum_sign]:
links[i - cum_sign].append(i)
block_free_count = 0
cum_sign_list[i] = cum_sign
# The previous loop has only created links between consecutive free indices,
# it is necessary to properly create links among dummy (contracted) indices,
# according to the rules described in Kahane's paper. There is only one exception
# to Kahane's rules: the negative indices, which handle the case of some
# consecutive free indices (Kahane's paper just describes dummy indices
# separated by free indices, hinting that free indices can be added without
# altering the expression result).
for i in dum:
# get the positions of the two contracted indices:
pos1 = i[0]
pos2 = i[1]
# create Kahane's upper links, i.e. the upper arcs between dummy
# (i.e. contracted) indices:
links[pos1].append(pos2)
links[pos2].append(pos1)
# create Kahane's lower links, this corresponds to the arcs below
# the line described in the paper:
# first we move `pos1` and `pos2` according to the sign of the indices:
linkpos1 = pos1 + cum_sign_list[pos1]
linkpos2 = pos2 + cum_sign_list[pos2]
# otherwise, perform some checks before creating the lower arcs:
# make sure we are not exceeding the total number of indices:
if linkpos1 >= total_number:
continue
if linkpos2 >= total_number:
continue
# make sure we are not below the first dummy index in `expression`:
if linkpos1 < first_dum_pos:
continue
if linkpos2 < first_dum_pos:
continue
# check if the previous loop created "virtual" indices between dummy
# indices, in such a case relink `linkpos1` and `linkpos2`:
if (-1-linkpos1) in links:
linkpos1 = -1-linkpos1
if (-1-linkpos2) in links:
linkpos2 = -1-linkpos2
# move only if not next to free index:
if linkpos1 >= 0 and not index_is_free[linkpos1]:
linkpos1 = pos1
if linkpos2 >=0 and not index_is_free[linkpos2]:
linkpos2 = pos2
# create the lower arcs:
if linkpos2 not in links[linkpos1]:
links[linkpos1].append(linkpos2)
if linkpos1 not in links[linkpos2]:
links[linkpos2].append(linkpos1)
# This loop starts from the `first_dum_pos` index (first dummy index)
# walks through the graph deleting the visited indices from `links`,
# it adds a gamma matrix for every free index in encounters, while it
# completely ignores dummy indices and virtual indices.
pointer = first_dum_pos
previous_pointer = 0
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
else:
break
if pointer == previous_pointer:
break
if pointer >=0 and free_pos[pointer] is not None:
for ri in resulting_indices:
ri.append(free_pos[pointer])
# The following loop removes the remaining connected components in `links`.
# If there are free indices inside a connected component, it gives a
# contribution to the resulting expression given by the factor
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
# paper represented as {gamma_a, gamma_b, ... , gamma_z},
# virtual indices are ignored. The variable `connected_components` is
# increased by one for every connected component this loop encounters.
# If the connected component has virtual and dummy indices only
# (no free indices), it contributes to `resulting_indices` by a factor of two.
# The multiplication by two is a result of the
# factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper.
# Note: curly brackets are meant as in the paper, as a generalized
# multi-element anticommutator!
while links:
connected_components += 1
pointer = min(links.keys())
previous_pointer = pointer
# the inner loop erases the visited indices from `links`, and it adds
# all free indices to `prepend_indices` list, virtual indices are
# ignored.
prepend_indices = []
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
if len(next_ones) > 1:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
if pointer >= first_dum_pos and free_pos[pointer] is not None:
prepend_indices.insert(0, free_pos[pointer])
# if `prepend_indices` is void, it means there are no free indices
# in the loop (and it can be shown that there must be a virtual index),
# loops of virtual indices only contribute by a factor of two:
if len(prepend_indices) == 0:
resulting_coeff *= 2
# otherwise, add the free indices in `prepend_indices` to
# the `resulting_indices`:
else:
expr1 = prepend_indices
expr2 = list(reversed(prepend_indices))
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
# sign correction, as described in Kahane's paper:
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
# power of two factor, as described in Kahane's paper:
resulting_coeff *= 2**(number_of_contractions)
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
# matrices in front of `expression`, so multiply by them:
for i in range(0, first_dum_pos):
[ri.insert(0, free_pos[i]) for ri in resulting_indices]
resulting_expr = S.Zero
for i in resulting_indices:
temp_expr = S.One
for j in i:
temp_expr *= GammaMatrix(j)
resulting_expr += temp_expr
t = resulting_coeff * resulting_expr
t1 = None
if isinstance(t, TensAdd):
t1 = t.args[0]
elif isinstance(t, TensMul):
t1 = t
if t1:
pass
else:
t = eye(4)*t
return t
| kaushik94/sympy | sympy/physics/hep/gamma_matrices.py | Python | bsd-3-clause | 24,227 | [
"DIRAC"
] | c07b9914029210452488b4581fb467d23a72db4d6e4247131ee36a0aff602d75 |
#
# -*- coding: utf-8 -*-
#
# Python-Based Truss Solver
# =============================================================
#
# Author: Robert Grandin
#
# Date: Fall 2007 (Creation of original Fortran solution in AerE 361)
# October 2011 (Python implementation)
# November 2014 (Clean-up and graphical/VTK output)
#
#
# PURPOSE:
# This code solves a truss for the internal load, strain, and stress of each member.
# Being a truss, all members are assumed to be two-force members and no bending
# moments are considered. Both 2-dimensional and 3-dimensional trusses can be
# solved with this code.
#
#
# INSTRUCTIONS & NOTES:
# - Dictionaries are used to define the entity properties. Names for the properties
# should be self-explanatory. Some notes:
# - '_flag' entries identify either displacement ('d') or force ('f') boundary
# conditions (BCs). Applied forces require force BCs to be specified.
# Pin/roller locations require displacement BCs. Free-to-move nodes will
# typically have 0-force BCs.
# - '_bcval' entries specify the BC value for the corresponding flag.
# - If solving a 2-dimensional problem, constrain node motion in the 3rd
# dimension to be 0. Allowing nodal motion in the 3rd dimension (by setting
# the constraint to 0-force) will produce a matrix with non-empty null-space.
# Displacements in the third dimension will reside in this null-space.
# - Input data can be saved in a python data file. Create a module for your
# problem and define a function which returns 'nodes, members'.
# - Examples shown below for 2D, 3D, and file-based input. See data file
# 'em514_problem08.py' for an example of how to write an input file.
#
#
# HOMEWORK DISCLAIMER:
# This tool is intended to be a learning aid. Feel free to use it to check your
# work, but do not use it in place of learning how to find the solution yourself.
#
# When using this tool for statics problems, the member loads calculated by this
# tool will not match the correct answer for the statics problem. This is due
# to the fact that this tool considers displacements whereas displacements are
# not considered in a statics problem (but displacements are considered in
# mechanics problems). Even though the numerical results will not match when
# checking statics results, the discrepancy should be small enough to enable
# you to determine if your statics result is correct.
#
#
#
# ========================
#
# 2D SAMPLE INPUT
#
#nodes = [{'x': 0.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'f', 'ybcval': -800.0e0, 'zflag': 'd', 'zbcval': 0.0e0}]
#nodes.append({'x': 36.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#nodes.append({'x': 72.0e0, 'y': 18.0e0, 'z': 0.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#nodes.append({'x': 36.0e0, 'y': 18.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'f', 'ybcval': -1000.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#
#members = [{'start': 0, 'end': 1, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6}]
#members.append({'start': 1, 'end': 2, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 1, 'end': 3, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 2, 'end': 3, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 0, 'end': 3, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#
#
#
# ========================
#
# 3D SAMPLE INPUT
#
#nodes = [{'x': 0.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0}]
#nodes.append({'x': 20.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'f', 'ybcval': 0.0e0, 'zflag': 'f', 'zbcval': 1000.0e0})
#nodes.append({'x': 0.0e0, 'y': 25.0e0, 'z': 0.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#nodes.append({'x': 0.0e0, 'y': 0.0e0, 'z': 10.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#
#members = [{'start': 0, 'end': 1, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6}]
#members.append({'start': 1, 'end': 2, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 3, 'end': 1, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#
#
#
# ========================
#
# DATA FILE SAMPLE INPUT
#
import em274_assess5_2017 # Name of python file, no extension
reload(em274_assess5_2017) # Force reload to catch any updates/revisions
nodes, members = em274_assess5_2017.DefineInputs() # Call input-definition function
# Set scale factor to make display more-easily understood.
displayScaleFactor = 100.0
# =============================================================================================
#
#
#
# NO EDITS REQUIRED BELOW HERE
#
#
#
# =============================================================================================
# ========================
#
# IMPORT PYTHON MODULES REQUIRED FOR SOLUTION
#
import numpy # General linear algebra capability
import scipy # Advanced routines for evaluating solution quality
import matplotlib.pyplot as plt # 2D plotting
# ========================
#
# ECHO INPUT VALUES TO SCREEN
#
# Calculate Member Properties
nnodes = len(nodes)
nmem = len(members)
# Write Input Information
print(' ')
print('==============================================')
print(' ')
print(' INPUT INFORMATION')
print(' ')
print('==============================================')
print(' ')
print('Pin Input Information')
print('--------------------------------------')
for i in range(nnodes):
print('Node % 3d' % (i))
print(' Position: ( % 12.3g, % 12.3g, % 12.3g )' % (nodes[i]['x'], nodes[i]['y'], nodes[i]['z']))
print(' BC Type: ( %*.*s, %*.*s, %*.*s )' % (12,12,nodes[i]['xflag'], 12,12,nodes[i]['yflag'], 12,12,nodes[i]['zflag']))
print(' BC Value: ( % 12.3g, % 12.3g, % 12.3g )' % (nodes[i]['xbcval'], nodes[i]['ybcval'], nodes[i]['zbcval']))
print(' ')
print(' ')
print(' ')
print(' ')
print('Member Input Information')
print('--------------------------------------')
for i in range(nmem):
print('Member % 3d' % (i))
print(' Start, end nodes: ( % 3d, % 3d )' % (members[i]['start'], members[i]['end']))
print(' Young\'s Modulus: % 12.3g' % (members[i]['E']))
print(' Cross-sectional Area: % 12.3g' % (members[i]['A']))
print(' Yield Strength: % 12.3g' % (members[i]['sigma_yield']))
print(' Ultimate Strength: % 12.3g' % (members[i]['sigma_ult']))
print(' ')
print(' ')
print(' ')
print(' ')
# ========================
#
# SETUP MATRIX EQUATION AND SOLVE
#
# Calculate member properties
for i in range(nmem):
dx = nodes[members[i]['end']]['x'] - nodes[members[i]['start']]['x']
dy = nodes[members[i]['end']]['y'] - nodes[members[i]['start']]['y']
dz = nodes[members[i]['end']]['z'] - nodes[members[i]['start']]['z']
members[i]['L'] = numpy.sqrt(dx*dx + dy*dy + dz*dz)
members[i]['costheta_x'] = dx/members[i]['L']
members[i]['costheta_y'] = dy/members[i]['L']
members[i]['costheta_z'] = dz/members[i]['L']
# Build stiffness matrix
stiffness = numpy.zeros((3*nnodes,3*nnodes), dtype='float64')
G = numpy.zeros((6,6), dtype='float64')
for i in range(nmem):
tbm2 = 3*members[i]['start'] + 2
tbm1 = 3*members[i]['start'] + 1
tb = 3*members[i]['start']
tem2 = 3*members[i]['end'] + 2
tem1 = 3*members[i]['end'] + 1
te = 3*members[i]['end']
k = members[i]['A']*members[i]['E']/members[i]['L']
stiffness[tb][tb] += k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[tb][tbm1] += k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[tb][tbm2] += k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[tb][te] += -k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[tb][tem1] += -k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[tb][tem2] += -k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[tbm1][tb] += k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tbm1][tbm1] += k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tbm1][tbm2] += k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tbm1][te] += -k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tbm1][tem1] += -k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tbm1][tem2] += -k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tbm2][tb] += k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tbm2][tbm1] += k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tbm2][tbm2] += k*members[i]['costheta_z']*members[i]['costheta_z']
stiffness[tbm2][te] += -k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tbm2][tem1] += -k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tbm2][tem2] += -k*members[i]['costheta_z']*members[i]['costheta_z']
stiffness[te][tb] += -k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[te][tbm1] += -k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[te][tbm2] += -k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[te][te] += k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[te][tem1] += k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[te][tem2] += k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[tem1][tb] += -k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tem1][tbm1] += -k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tem1][tbm2] += -k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tem1][te] += k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tem1][tem1] += k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tem1][tem2] += k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tem2][tb] += -k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tem2][tbm1] += -k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tem2][tbm2] += -k*members[i]['costheta_z']*members[i]['costheta_z']
stiffness[tem2][te] += k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tem2][tem1] += k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tem2][tem2] += k*members[i]['costheta_z']*members[i]['costheta_z']
# Calculate average of main diagonal for numerical stability
average = 0.0e0
for i in range(3*nnodes):
average += stiffness[i][i]
average /= float(3*nnodes)
# Create and fill arrays to be used when solving matrix equation
A = numpy.zeros(stiffness.shape, dtype='float64')
b = numpy.zeros((3*nnodes,1), dtype='float64')
for i in range(nnodes):
icol = 3*i
if(nodes[i]['xflag'] == 'd'):
for j in range(3*nnodes):
b[j] -= stiffness[j][icol]*nodes[i]['xbcval']
A[icol][icol] = -average
if(nodes[i]['xflag'] == 'f'):
b[icol] += nodes[i]['xbcval']
for j in range(3*nnodes):
A[j][icol] = stiffness[j][icol]
icol = 3*i + 1
if(nodes[i]['yflag'] == 'd'):
for j in range(3*nnodes):
b[j] -= stiffness[j][icol]*nodes[i]['ybcval']
A[icol][icol] = -average
if(nodes[i]['yflag'] == 'f'):
b[icol] += nodes[i]['ybcval']
for j in range(3*nnodes):
A[j][icol] = stiffness[j][icol]
icol = 3*i + 2
if(nodes[i]['zflag'] == 'd'):
for j in range(3*nnodes):
b[j] -= stiffness[j][icol]*nodes[i]['zbcval']
A[icol][icol] = -average
if(nodes[i]['zflag'] == 'f'):
b[icol] += nodes[i]['zbcval']
for j in range(3*nnodes):
A[j][icol] = stiffness[j][icol]
# Solve the system
x,res,rank,singularvals = numpy.linalg.lstsq(A,b)
# Calculate nodal results
for i in range(nnodes):
if(nodes[i]['xflag'] == 'f'):
nodes[i]['xdisp'] = x[3*i+0][0]
nodes[i]['xforce'] = nodes[i]['xbcval']
if(nodes[i]['xflag'] == 'd'):
nodes[i]['xdisp'] = nodes[i]['xbcval']
nodes[i]['xforce'] = x[3*i+0][0]
if(nodes[i]['yflag'] == 'f'):
nodes[i]['ydisp'] = x[3*i+1][0]
nodes[i]['yforce'] = nodes[i]['ybcval']
if(nodes[i]['yflag'] == 'd'):
nodes[i]['ydisp'] = nodes[i]['ybcval']
nodes[i]['yforce'] = x[3*i+1][0]
if(nodes[i]['zflag'] == 'f'):
nodes[i]['zdisp'] = x[3*i+2][0]
nodes[i]['zforce'] = nodes[i]['zbcval']
if(nodes[i]['zflag'] == 'd'):
nodes[i]['zdisp'] = nodes[i]['zbcval']
nodes[i]['zforce'] = x[3*i+2][0]
nodes[i]['xnew'] = nodes[i]['x'] + nodes[i]['xdisp']
nodes[i]['ynew'] = nodes[i]['y'] + nodes[i]['ydisp']
nodes[i]['znew'] = nodes[i]['z'] + nodes[i]['zdisp']
# Calculate member results
for i in range(nmem):
dx = nodes[members[i]['end']]['xnew'] - nodes[members[i]['start']]['xnew']
dy = nodes[members[i]['end']]['ynew'] - nodes[members[i]['start']]['ynew']
dz = nodes[members[i]['end']]['znew'] - nodes[members[i]['start']]['znew']
members[i]['Lnew'] = numpy.sqrt(dx*dx + dy*dy + dz*dz)
members[i]['epsilon'] = (members[i]['Lnew'] - members[i]['L'])/members[i]['L']
members[i]['stress'] = members[i]['epsilon']*members[i]['E']
members[i]['load'] = members[i]['stress']*members[i]['A']
# Calculate null space of A (http://stackoverflow.com/questions/2992947/calculating-the-null-space-of-a-matrix)
u, s, vh = numpy.linalg.svd(A)
null_mask = (s <= 1.0e-15)
null_space = scipy.compress(null_mask, vh, axis=0)
nullspace = scipy.transpose(null_space)
# ========================
#
# OUTPUT RESULTS TO TERMINAL
#
print(' ')
print('==============================================')
print(' ')
print(' RESULTS')
print(' ')
print('==============================================')
print(' ')
print('Pin Displacements (x,y,z)')
print('--------------------------------------')
for i in range(nnodes):
print('Node % 3d: % 10.5e % 10.5e % 10.5e' % (i,nodes[i]['xdisp'],nodes[i]['ydisp'],nodes[i]['zdisp']))
print(' ')
print(' ')
print('Member Results')
print('--------------------------------------')
for i in range(nmem):
print('Member % 3d:' % (i))
print(' Internal Load: % 10.5e' % (members[i]['load']))
print(' Axial Strain: % 10.5e' % (members[i]['epsilon']))
print(' Axial Stress: % 10.5e' % (members[i]['stress']))
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
print(' --> YIELD STRESS SURPASSED')
if(members[i]['stress'] > members[i]['sigma_ult']):
print(' --> ULTIMATE STRESS SURPASSED')
print(' ')
print(' ')
print(' ')
print(' ')
print('==============================================')
print(' ')
print(' SOLUTION QUALITY INDICATORS')
print(' ')
print('==============================================')
print(' ')
print('Rank of A matrix: %d' % (rank))
print(' ')
print('Size of A: %d' % (3*nnodes))
print(' ')
print('Condition Number: % 10.3e (smaller is better)' % (singularvals.max()/singularvals.min()))
print(' General rule: If condition number is O(10^n), discard last n digits')
print(' from the results.')
print(' ')
print('Singular values: ')
for i in range(len(singularvals)):
print(' % 12.10g' % (singularvals[i]))
print(' ')
print('Nullspace of A:')
print nullspace
# ========================
#
# GENERATE PLOTS
#
xOriginal = numpy.zeros((nnodes))
yOriginal = numpy.zeros((nnodes))
zOriginal = numpy.zeros((nnodes))
xNew = numpy.zeros((nnodes))
yNew = numpy.zeros((nnodes))
zNew = numpy.zeros((nnodes))
for i in range(nnodes):
xOriginal[i] = nodes[i]['x']
xNew[i] = xOriginal[i] + nodes[i]['xdisp']*displayScaleFactor
yOriginal[i] = nodes[i]['y']
yNew[i] = yOriginal[i] + nodes[i]['ydisp']*displayScaleFactor
zOriginal[i] = nodes[i]['z']
zNew[i] = zOriginal[i] + nodes[i]['zdisp']*displayScaleFactor
xmin1 = numpy.min(xOriginal)
xmin2 = numpy.min(xNew)
xmin = min(xmin1,xmin2)
ymin1 = numpy.min(yOriginal)
ymin2 = numpy.min(yNew)
ymin = min(ymin1,ymin2)
zmin1 = numpy.min(zOriginal)
zmin2 = numpy.min(zNew)
zmin = min(zmin1,zmin2)
xmax1 = numpy.max(xOriginal)
xmax2 = numpy.max(xNew)
xmax = min(xmax1,xmax2)
ymax1 = numpy.max(yOriginal)
ymax2 = numpy.max(yNew)
ymax = min(ymax1,ymax2)
zmax1 = numpy.max(zOriginal)
zmax2 = numpy.max(zNew)
zmax = min(zmax1,zmax2)
xRange = xmax - xmin
yRange = ymax - ymin
zRange = zmax - zmin
factor = 0.02
# Generate XY view
plt.figure()
plt.plot(xOriginal, yOriginal, 'ob', label='Original Position')
plt.hold(True)
plt.plot(xNew, yNew, 'or', label='New Position')
for i in range(nmem):
xx = [xOriginal[members[i]['start']], xOriginal[members[i]['end']]]
yy = [yOriginal[members[i]['start']], yOriginal[members[i]['end']]]
plt.plot(xx, yy, '-b')
xx2 = [xNew[members[i]['start']], xNew[members[i]['end']]]
yy2 = [yNew[members[i]['start']], yNew[members[i]['end']]]
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ffa500")
if(members[i]['stress'] > members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ff2500")
else:
plt.plot(xx2, yy2, color="#006600")
plt.xlim([xmin - xRange*factor, xmax + xRange*factor])
plt.ylim([ymin - yRange*factor, ymax + yRange*factor])
plt.xlabel('X Position')
plt.ylabel('Y Position')
plt.title('Truss - XY View -- Displacements Scaled ' + str(displayScaleFactor) + 'x')
plt.grid(True)
plt.legend()
plt.savefig('Truss_XY_View.png')
# If displacement in the Z-direction exists, plot XZ and YZ views. Note that
# the zRange cannot be compared to precisely '0' due to floating-point errors,
# so it is compared to a very small value instead. Also note that 'x' and 'y'
# refer to the 2D plot and therefore do not necessarily correspond directly
# to the 'x' and 'y' coordinates of the nodes.
if(zRange > 1.0e-5):
plt.figure()
plt.plot(xOriginal, zOriginal, 'ob', label='Original Position')
plt.hold(True)
plt.plot(xNew, zNew, 'or', label='New Position')
for i in range(nmem):
xx = [xOriginal[members[i]['start']], xOriginal[members[i]['end']]]
yy = [zOriginal[members[i]['start']], zOriginal[members[i]['end']]]
plt.plot(xx, yy, '-b')
xx2 = [xNew[members[i]['start']], xNew[members[i]['end']]]
yy2 = [zNew[members[i]['start']], zNew[members[i]['end']]]
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ffa500")
if(members[i]['stress'] > members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ff2500")
else:
plt.plot(xx2, yy2, color="#006600")
plt.xlim([xmin - xRange*factor, xmax + xRange*factor])
plt.ylim([zmin - zRange*factor, zmax + zRange*factor])
plt.xlabel('X Position')
plt.ylabel('Z Position')
plt.title('Truss - XZ View -- Displacements Scaled ' + str(displayScaleFactor) + 'x')
plt.grid(True)
plt.legend()
plt.savefig('Truss_XZ_View.png')
plt.figure()
plt.plot(yOriginal, zOriginal, 'ob', label='Original Position')
plt.hold(True)
plt.plot(yNew, zNew, 'or', label='New Position')
for i in range(nmem):
xx = [yOriginal[members[i]['start']], yOriginal[members[i]['end']]]
yy = [zOriginal[members[i]['start']], zOriginal[members[i]['end']]]
plt.plot(xx, yy, '-b')
xx2 = [yNew[members[i]['start']], yNew[members[i]['end']]]
yy2 = [zNew[members[i]['start']], zNew[members[i]['end']]]
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ffa500")
if(members[i]['stress'] > members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ff2500")
else:
plt.plot(xx2, yy2, color="#006600")
plt.xlim([ymin - yRange*factor, ymax + yRange*factor])
plt.ylim([zmin - zRange*factor, zmax + zRange*factor])
plt.xlabel('Y Position')
plt.ylabel('Z Position')
plt.title('Truss - YZ View -- Displacements Scaled ' + str(displayScaleFactor) + 'x')
plt.grid(True)
plt.legend()
plt.savefig('Truss_YZ_View.png')
# Write results to VTK files to enable more-flexible visualization via ParaView
# (or any other VTK-supporting viewer)
f = open('TrussOriginal.vtk', 'w')
f.write("# vtk DataFile Version 2.0 \n")
f.write("Truss - Original Configuration \n")
f.write("ASCII \n")
f.write("DATASET UNSTRUCTURED_GRID \n")
f.write("Points " + str(nnodes) + " float \n")
for i in range(nnodes):
f.write(str(nodes[i]['x']) + " " + str(nodes[i]['y']) + " " + str(nodes[i]['z']) + " \n")
f.write("Cells " + str(nmem) + " " + str(nmem*3) + " \n")
for i in range(nmem):
f.write("2 " + str(members[i]['start']) + " " + str(members[i]['end']) + " \n")
f.write("Cell_Types " + str(nmem) + " \n")
for i in range(nmem):
f.write("3 \n") # All "cells" are of type VTK_LINE
f.close()
f = open('TrussNew.vtk', 'w')
f.write("# vtk DataFile Version 2.0 \n")
f.write("Truss - Deformed Configuration - Deformation scaled by " + str(displayScaleFactor) + "x \n")
f.write("ASCII \n")
f.write("DATASET UNSTRUCTURED_GRID \n")
f.write("Points " + str(nnodes) + " float \n")
for i in range(nnodes):
f.write(str(xNew[i]) + " " + str(yNew[i]) + " " + str(zNew[i]) + " \n")
f.write("Cells " + str(nmem) + " " + str(nmem*3) + " \n")
for i in range(nmem):
f.write("2 " + str(members[i]['start']) + " " + str(members[i]['end']) + " \n")
f.write("Cell_Types " + str(nmem) + " \n")
for i in range(nmem):
f.write("3 \n") # All "cells" are of type VTK_LINE
f.close()
| rgrandin/MechanicsTools | truss/truss_solver.py | Python | bsd-3-clause | 22,478 | [
"ParaView",
"VTK"
] | 37c20a8b534a6a3778f5fed949bee30063572d9f9d4f86f89bb5eac1e5a6150d |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cenet', '0005_auto_20160801_1639'),
]
operations = [
migrations.CreateModel(
name='RTW',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('variable', models.CharField(max_length=256)),
('startTime', models.FloatField()),
('endTime', models.FloatField()),
('samplingInterval', models.FloatField()),
('neuron', models.ForeignKey(to='cenet.Neuron')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RTW_CONF',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('public', models.BooleanField(default=False)),
('json', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('network', models.ForeignKey(to='cenet.CENetwork')),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| Si-elegans/Web-based_GUI_Tools | rtw_ui/migrations/0001_initial.py | Python | apache-2.0 | 1,674 | [
"NEURON"
] | d76c41f1560705b1d9c41595e277f6b57ce47a8278714424477527a1ab4dd4d9 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
<<<<<<< HEAD
from __future__ import division, unicode_literals
"""
Classes for reading/manipulating/writing FEFF files.
http://leonardo.phys.washington.edu/feff/
XANES and EXAFS input files, and the xmu.dat, ldos.dat output files are
available, for non-spin case at this time. FEFF input file has parameter tags,
potential definitions and atomic coordinates all in the feff.inp file. These
are each developed separately with the Header, FeffAtoms, FeffPot, and
FeffTags classes, then combined to produce the full feff.inp.
"""
from six.moves import map
__author__ = "Alan Dozier"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__status__ = "Beta"
__date__ = "April 7, 2013"
import re
import itertools
import warnings
import numpy as np
from collections import defaultdict, OrderedDict
from operator import itemgetter
from tabulate import tabulate
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.io.cif import CifParser
from pymatgen.util.string_utils import str_delimited
from monty.io import zopen
from pymatgen.util.io_utils import clean_lines
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from monty.json import MSONable
from six import string_types
class Header(MSONable):
"""
Creates Header for feff.inp file generated by pymatgen.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
def __init__(self, struct, source='', comment=''):
if struct.is_ordered:
self._struct = struct
self._source = source
self._site_symbols = []
self._natoms = []
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self._space_number = data["number"]
self._space_group = data["international"]
syms = [site.specie.symbol for site in struct]
for (s, data) in itertools.groupby(syms):
self._site_symbols.append(s)
self._natoms.append(len(tuple(data)))
if comment == '':
self._comment = 'None Given'
else:
self._comment = comment
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source='', comment=''):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self._space_group, self._space_number
@property
def source(self):
"""
Property method to return source string.
"""
return self._source
@property
def site_symbols(self):
"""
Symbols for each site in unit cell.
"""
return self._site_symbols
@property
def formula(self):
"""
Formula of structure
"""
return self._struct.composition.formula
@property
def struct(self):
"""
Structure associated with the atomic coordinates.
"""
return self._struct
@property
def space_group(self):
"""
Returns Space Group symbol
"""
return self._space_group
@property
def space_number(self):
"""
Returns Space group number
"""
return self._space_number
def as_dict(self):
"""
Returns Dictionary representation of Header Object
"""
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'comment': self._comment, 'source': self.source,
'structure': self._struct.as_dict()}
@staticmethod
def from_dict(hdict):
"""
Returns header object from a dictionary representation
"""
comment = hdict['comment']
source = hdict['source']
structure = Structure.from_dict(hdict['structure'])
return Header(structure, source, comment)
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename='feff.inp'):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
#Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
except IndexError:
feffpmg = 0
#Reads pymatgen generated header or feff.inp file
if feffpmg > 0:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return ''.join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
# Checks to see if generated by pymatgen, if not it is impossible to
# generate structure object so it is not possible to generate header
# object and routine ends
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg > 0:
comment2 = ' '.join(lines[1].split()[2:])
#This sec section gets information to create structure object
source = ' '.join(lines[2].split()[2:])
natoms = int(lines[8].split()[2])
basis_vec = lines[6].split()
a = float(basis_vec[2])
b = float(basis_vec[3])
c = float(basis_vec[4])
lengths = [a, b, c]
basis_ang = lines[7].split()
alpha = float(basis_ang[2])
beta = float(basis_ang[3])
gamma = float(basis_ang[4])
angles = [alpha, beta, gamma]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
#Structure object is now generated and Header object returned
struct_fromfile = Structure(lattice, atomic_symbols, coords, False,
False, False)
h = Header(struct_fromfile, source, comment2)
return h
else:
return "Header not generated by pymatgen, " \
"cannot return header object"
def __str__(self):
"""
String representation of Header.
"""
to_s = lambda x: "%0.6f" % x
output = ["* This FEFF.inp file generated by pymatgen",
''.join(["TITLE comment: ", self._comment]),
''.join(["TITLE Source: ", self.source]),
"TITLE Structure Summary: {}"
.format(self.struct.composition.formula),
"TITLE Reduced formula: {}"
.format(self.struct.composition.reduced_formula),
"TITLE space group: ({}), space number: ({})"
.format(self.space_group, self.space_number), "TITLE abc:{}"
.format(" ".join([to_s(i).rjust(10)
for i in self.struct.lattice.abc])),
"TITLE angles:{}"
.format(" ".join([to_s(i).rjust(10)
for i in self.struct.lattice.angles])),
"TITLE sites: {}".format(self.struct.num_sites)]
for i, site in enumerate(self.struct):
output.append(" ".join(["*", str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(output)
def write_file(self, filename='HEADER'):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class FeffAtoms(MSONable):
"""
Object for representing atomic positions, placed in feff.inp file
These are oredered as expanding shells.
"""
def __init__(self, struct, central_atom):
"""
Args:
struct: Structure object. See pymatgen.core.structure.Structure.
central_atom: Symbol for absorbing atom
"""
self._central_atom = central_atom
if struct.is_ordered:
self._struct = struct
self._site_symbols = []
self._natoms = []
syms = [site.specie.symbol for site in struct]
for (s, data) in itertools.groupby(syms):
self._site_symbols.append(s)
self._natoms.append(len(tuple(data)))
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms
if not unique_pot_atoms.count(i)]
self._pot_dict = {}
for i, atom in enumerate(unique_pot_atoms):
self._pot_dict[atom] = i + 1
@property
def central_atom(self):
"""
Returns central atom
"""
return self._central_atom
@property
def pot_dict(self):
"""
returns dictionary for potential indexes
"""
return self._pot_dict
@property
def site_symbols(self):
"""
Symbols for each site atomic coordinate for Feff list.
"""
return self._site_symbols
@property
def struct(self):
"""
Structure associated with the atomic coordinates.
"""
return self._struct
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1:
atoms_str.append(line.replace("\r", ""))
return ''.join(atoms_str)
@staticmethod
def from_dict(d):
"""
Returns feffAtoms object from dictionary
"""
return FeffAtoms(Structure.from_dict(d['structure']),
d['central_atom'])
def as_dict(self):
"""
Return Dictionary representation of atoms oject
"""
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'structure': self._struct.as_dict(),
'central_atom': self._central_atom}
@staticmethod
def from_string(data):
"""
At the moment does nothing.
From atoms string data generates atoms object
"""
return data
def get_string(self, radius=10.):
"""
Returns a string representation of atomic shell coordinates to be used
in the feff.inp file.
Args:
radius: Maximum atomic shell radius to include in atoms list
Returns:
String representation of Atomic Coordinate Shells.
"""
#Internal variables:
#
#nopts = number of potentials in unit cell used
#ptatoms = list of atom potential atom symbols in unit cell
#index = index number of absorbing atom in list
#pt = coordinates of absorbing atom
#sphere = sites around absorbing atom within radius
#x,y,zshift = coordinate shift to place absorbing atom at (0,0,0)
#atom = site in sphere
#atm = atomic symbol string for atom at atom site
#ipot = index for that atom symbol in potential dictionary
#distance = distance of that atom site from absorbing atom
nopts = len(self.struct.species)
ptatoms = [self.struct.species[i].symbol for i in range(nopts)]
index = ptatoms.index(self.central_atom)
pt = self.struct.cart_coords[index]
sphere = Structure.get_sites_in_sphere(self.struct, pt, radius)
xshift = pt[0]
yshift = pt[1]
zshift = pt[2]
end = len(sphere)
row = []
for i in range(end):
atom = sphere[i][0]
atm = re.sub(r"[^aA-zZ]+", "", atom.species_string)
ipot = self.pot_dict[atm]
x = atom.coords[0] - xshift
y = atom.coords[1] - yshift
z = atom.coords[2] - zshift
distance = sphere[i][1]
row.append(["{:f}".format(x), "{:f}".format(y), "{:f}".format(z),
ipot, atm, "{:f}".format(distance), i])
#after this point table is built
row_sorted = sorted(row, key=itemgetter(5))
row_sorted[0][3] = 0
for i in range(end):
row_sorted[i][6] = i
row_sorted = str(tabulate(row_sorted,
headers=["* x", "y", "z", "ipot",
"Atom", "Distance", "Number"]))
atom_list = row_sorted.replace("--", "**")
return ''.join(["ATOMS\n", atom_list, "\nEND\n"])
def __str__(self):
"""
String representation of Atoms file.
"""
return self.get_string()
def write_file(self, filename='ATOMS'):
"""
Write Atoms list to filename
Args:
filename: path for file to be written
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = ("CONTROL", "PRINT", "ATOMS", "POTENTIALS", "RECIPROCAL",
"REAL", "MARKER", "LATTICE", "TITLE", "RMULTIPLIER",
"SGROUP", "COORDINATES", "EQUIVALENCE", "CIF", "CGRID",
"CFAVERAGE", "OVERLAP", "EXAFS", "XANES", "ELNES", "EXELFS",
"LDOS", "ELLIPTICITY", "MULTIPOLE", "POLARIZATION",
"RHOZZP", "DANES", "FPRIME", "NRIXS", "XES", "XNCD",
"XMCD", "XNCDCONTROL", "END", "KMESH", "PRINT", "EGRID",
"DIMS", "AFLOP", "EDGE", "COMPTON", "DANES",
"FPRIME" "MDFF", "HOLE", "COREHOLE", "S02", "CHBROAD",
"EXCHANGE", "FOLP", "NOHOLE", "RGRID", "SCF",
"UNFREEZEF", "CHSHIFT", "DEBYE",
"INTERSTITIAL", "CHWIDTH", "EGAP", "EPS0", "EXTPOT",
"ION", "JUMPRM", "EXPOT", "SPIN", "LJMAX", "LDEC", "MPSE",
"PLASMON", "RPHASES", "RSIGMA", "PMBSE", "TDLDA", "FMS",
"DEBYA", "OPCONS", "PREP", "RESTART", "SCREEN", "SETE",
"STRFACTORS", "BANDSTRUCTURE", "RPATH", "NLEG", "PCRITERIA",
"SYMMETRY", "SS", "CRITERIA", "IORDER", "NSTAR", "ABSOLUTE",
"CORRECTIONS", "SIG2", "SIG3", "MBCONV", "SFCONV", "RCONV",
"SELF", "SFSE", "MAGIC")
class FeffTags(dict):
"""
feff_tag object for reading and writing PARAMETER files
"""
def __init__(self, params=None):
"""
Creates a Feff_tag object.
Args:
params: A set of input parameters as a dictionary.
"""
super(FeffTags, self).__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair to Feff_tag file. Warns if parameter is not in
list of valid Feff tags. Also cleans the parameter and val by stripping
leading and trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super(FeffTags, self).__setitem__(key.strip(),
FeffTags.proc_val(key.strip(),
val.strip())
if isinstance(val, string_types)
else val)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = {k: v for k, v in self.items()}
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates FeffTags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
FeffTags object
"""
i = FeffTags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=True, pretty=True):
"""
Returns a string representation of the Feff_tag file. The reason why
this method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output, False for no.
Returns:
String representation of FeffTags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
def __str__(self):
return self.get_string(sort_keys=False, pretty=True)
def write_file(self, filename='PARAMETERS'):
"""
Write FeffTags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with open(filename, "w") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "r") as f:
lines = list(clean_lines(f.readlines()))
params = {}
for line in lines:
m = re.match("([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = FeffTags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
params[key] = val
return FeffTags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = VALID_FEFF_TAGS
boolean_type_keys = ()
float_type_keys = ("SCF", "EXCHANGE", "S02", "FMS", "XANES", "EXAFS",
"RPATH", "LDOS")
int_type_keys = ("PRINT", "CONTROL")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_type_keys:
output = list()
toks = re.split("\s+", val)
for tok in toks:
m = re.match("(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search("^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
if key in int_type_keys:
return int(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default",
"FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another FeffTags object to this object
Facilitates the use of "standard" FeffTags
"""
params = {k: v for k, v in self.items()}
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("FeffTags have conflicting values!")
else:
params[k] = v
return FeffTags(params)
class FeffPot(MSONable):
"""
Object for representing Atomic Potenitals, placed in feff.inp file
"""
def __init__(self, struct, central_atom):
"""
Args:
struct: Structure object. See pymatgen.core.structure.Structure.
central_atom: Absorbing atom symbol
"""
self._central_atom = central_atom
if struct.is_ordered:
self._struct = struct
self._site_symbols = []
self._natoms = []
syms = [site.specie.symbol for site in struct]
for (s, data) in itertools.groupby(syms):
self._site_symbols.append(s)
self._natoms.append(len(tuple(data)))
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms
if not unique_pot_atoms.count(i)]
self._pot_dict = {}
for i, atom in enumerate(unique_pot_atoms):
self._pot_dict[atom] = i + 1
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@property
def central_atom(self):
"""
Returns Central absorbing atom
"""
return self._central_atom
@property
def pot_dict(self):
"""
Returns dictionary of potential indexes
"""
return self._pot_dict
@property
def site_symbols(self):
"""
Symbols for each site.
"""
return self._site_symbols
@property
def struct(self):
"""
Structure associated with the atomic coordinates.
"""
return self._struct
def as_dict(self):
"""
Return Dictionary representation of FeffPot oject
"""
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'structure': self._struct.as_dict(),
'central_atom': self._central_atom}
@staticmethod
def from_dict(d):
"""
Returns FeffPot object from dictionary
Args:
d: dictionary of FeffPot input parameters
"""
return FeffPot(Structure.from_dict(d['structure']),
d['central_atom'])
@staticmethod
def pot_string_from_file(filename='feff.inp'):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "r") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str)
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
noelements = len(self.struct.composition.items())
nopts = len(self.struct.species)
ptatoms = []
for i in range(0, nopts):
ptatoms.append(self.struct.species[i].symbol)
index = ptatoms.index(self.central_atom)
center = self.struct.species[index]
cs = center.symbol
cz = center.Z
ipotrow = [[0, cz, cs, -1, -1, .0001, 0]]
for i in range(0, noelements):
center = list(self.struct.composition.items())[i][0]
cs = center.symbol
cz = center.Z
ipot = self.pot_dict[cs]
stoic = list(self.struct.composition.items())[i][1]
ipotrow.append([ipot, cz, cs, -1, -1, stoic, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(tabulate(ipot_sorted,
headers=["*ipot", "Z", "tag", "lmax1",
"lmax2", "xnatph(stoichometry)",
"spinph"]))
ipotlist = ipotrow.replace("--", "**")
ipotlist = ''.join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename='POTENTIALS'):
"""
Write to a filename.
Args:
filename: filename and path to write potential file to.
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class FeffLdos(MSONable):
"""
Parser for ldos files ldos01, ldos02, .....
Args:
complete_dos: complete_dos dictionary as defined in pymatgen.dos
.CompleteDos
charge_transfer: computed charge transfer between atoms dictionary
"""
def __init__(self, complete_dos, charge_transfer):
self._complete_dos = complete_dos
self._cht = charge_transfer
@staticmethod
def from_file(filename1='feff.inp', filename2='ldos'):
""""
Creates FeffLdos object from raw Feff ldos files by
by assuming they are numbered consequetively, i.e. ldos01.dat
ldos02.dat...
Args:
filename1: input file of run to obtain structure
filename2: output ldos file of run to obtain dos info, etc.
"""
ldos_filename = filename2
header_str = Header.header_string_from_file(filename1)
header = Header.from_string(header_str)
structure = header.struct
nsites = structure.num_sites
pot_string = FeffPot.pot_string_from_file(filename1)
dicts = FeffPot.pot_dict_from_string(pot_string)
pot_dict = dicts[0]
with zopen(ldos_filename + "00.dat", "r") as fobject:
f = fobject.readlines()
efermi = float(f[0].split()[4])
dos_energies = []
ldos = {}
for i in range(1, len(pot_dict) + 1):
if len(str(i)) == 1:
ldos[i] = np.loadtxt("{}0{}.dat".format(ldos_filename, i))
else:
ldos[i] = np.loadtxt("{}{}.dat".format(ldos_filename, i))
for i in range(0, len(ldos[1])):
dos_energies.append(ldos[1][i][0])
all_pdos = []
vorb = {"s": Orbital.s, "p": Orbital.py, "d": Orbital.dxy,
"f": Orbital.f0}
forb = {"s": 0, "p": 1, "d": 2, "f": 3}
dlength = len(ldos[1])
for i in range(nsites):
pot_index = pot_dict[structure.species[i].symbol]
all_pdos.append(defaultdict(dict))
for k, v in vorb.items():
density = [ldos[pot_index][j][forb[k] + 1]
for j in range(dlength)]
updos = density
downdos = None
if downdos:
all_pdos[-1][v] = {Spin.up: updos, Spin.down: downdos}
else:
all_pdos[-1][v] = {Spin.up: updos}
pdos = all_pdos
vorb2 = {0: Orbital.s, 1: Orbital.py, 2: Orbital.dxy, 3: Orbital.f0}
pdoss = {structure[i]: {v: pdos[i][v]
for v in vorb2.values()}
for i in range(len(pdos))}
forb = {"s": 0, "p": 1, "d": 2, "f": 3}
tdos = [0] * dlength
for i in range(nsites):
pot_index = pot_dict[structure.species[i].symbol]
for v in forb.values():
density = [ldos[pot_index][j][v + 1] for j in range(dlength)]
for j in range(dlength):
tdos[j] = tdos[j] + density[j]
tdos = {Spin.up: tdos}
dos = Dos(efermi, dos_energies, tdos)
complete_dos = CompleteDos(structure, dos, pdoss)
charge_transfer = FeffLdos.charge_transfer_from_file(filename1,
filename2)
return FeffLdos(complete_dos, charge_transfer)
@property
def complete_dos(self):
"""returns Complete Dos"""
return self._complete_dos
@property
def charge_transfer(self):
"""returns charge transfer between atoms dictionary"""
return self._cht
def as_dict(self):
"""
returns Json-serializable dict representation of ompletedos
"""
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'complete_dos': self._complete_dos.as_dict(),
'charge_transfer': self.charge_transfer}
@staticmethod
def from_dict(d):
"""
Returns FeffLdos object from dict representation
Args:
complete_dos: dict representation of complete_dos
"""
complete_dos = CompleteDos.from_dict(d['complete_dos'])
charge_transfer = d['charge_transfer']
return FeffLdos(complete_dos, charge_transfer)
@staticmethod
def charge_transfer_from_file(filename1, filename2):
"""
Get charge transfer from file.
Args:
filename1: name of feff.inp file for run
filename2: ldos filename for run, assume consequetive order, .i.e.,
ldos01.dat, ldos02.dat....
Returns:
dictionary of dictionaries in order of potential sites
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
"""
cht = OrderedDict()
pot_string = FeffPot.pot_string_from_file(filename1)
dicts = FeffPot.pot_dict_from_string(pot_string)
pot_dict = dicts[1]
for i in range(0, len(dicts[0]) + 1):
if len(str(i)) == 1:
with zopen("{}0{}.dat".format(filename2, i), "r") \
as fobject:
f = fobject.readlines()
s = float(f[3].split()[2])
p = float(f[4].split()[2])
d = float(f[5].split()[2])
f1 = float(f[6].split()[2])
tot = float(f[1].split()[4])
cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d,
'f': f1,
'tot': tot}}
else:
with zopen(filename2 + str(i) + ".dat", "r") as fid:
f = fid.readlines()
s = float(f[3].split()[2])
p = float(f[4].split()[2])
d = float(f[5].split()[2])
f1 = float(f[6].split()[2])
tot = float(f[1].split()[4])
cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d,
'f': f1,
'tot': tot}}
return cht
def charge_transfer_to_string(self):
"""returns shrage transfer as string"""
ch = self.charge_transfer
chts = ['\nCharge Transfer\n\nCentral atom']
for i in range(len(ch)):
for atom, v2 in ch[str(i)].items():
a = ['\n', atom, '\n', 's ', str(v2['s']), '\n',
'p ', str(v2['p']), '\n',
'd ', str(v2['d']), '\n',
'f ', str(v2['f']), '\n',
'tot ', str(v2['tot']), '\n']
chts.extend(a)
return ''.join(chts)
class Xmu(MSONable):
"""
Parser for data in xmu.dat
Reads in data from xmu Feff file for plotting
This file contains the absorption cross-sections
for the single absorber and absorber in solid.
Args:
header: Header object
parameters: FeffTags object
pots: FeffPot string
data: numpy data array of cross_sections
Default attributes:
xmu:
Photon absorption cross section of absorber atom in material
mu:
Photon absorption cross section of single absorber atom
Energies:
Energies of data point
Edge:
Aborption Edge
Absorbing atom:
Species of absorbing atom
Material:
Formula of material
Source:
Source of structure
Calculation:
Type of Feff calculation performed
as_dict: creates a dictionary representation of attributes and data
"""
def __init__(self, header, parameters, central_atom, data):
self._header = header
self._parameters = parameters
self._central_atom = central_atom
self._data = data
@staticmethod
def from_file(filename="xmu.dat", input_filename="feff.inp"):
"""
Get Xmu from file.
Args:
filename: filename and path for xmu.dat
input_filename: filename and path of feff.inp input file
Returns:
Xmu object
"""
data = np.loadtxt(filename)
header = Header.from_file(input_filename)
parameters = FeffTags.from_file(input_filename)
pots = FeffPot.pot_string_from_file(input_filename)
central_atom = pots.splitlines()[1].split()[2]
return Xmu(header, parameters, central_atom, data)
@property
def data(self):
"returns numpy data array"""
return self._data
@property
def energies(self):
"""Returns energies for cross-section plots"""
energies = []
for i in range(len(self._data)):
energy = self._data[i][0]
energies[len(energies):] = [energy]
return energies
@property
def across_section(self):
"""Returns absobtion cross-section of absorbing atom in solid"""
across = []
for i in range(len(self._data)):
a = self._data[i][3]
across[len(across):] = [a]
return across
@property
def scross_section(self):
"""Returns aborption cross-section for absorbing atom"""
scross = []
for i in range(len(self._data)):
s = self._data[i][4]
scross[len(scross):] = [s]
return scross
@property
def source(self):
"""
Returns source identification from Header file
"""
return self._header.source
@property
def calc(self):
"""
Returns type of Feff calculation, XANES or EXAFS from feff.inp file
"""
if "XANES" in self._parameters:
calc = "XANES"
else:
calc = "EXAFS"
return calc
@property
def material_formula(self):
"""Returns chemical formula of material from feff.inp file"""
try:
form = self._header.formula
except IndexError:
form = 'No formula provided'
return "".join(map(str, form))
@property
def absorbing_atom(self):
"""Returns absorbing atom symbol from feff.inp file"""
return self._central_atom
@property
def edge(self):
"""Returns excitation edge from feff.inp file"""
return self._parameters["EDGE"]
def as_dict(self):
"""Returns Dictionary of attributes and to
reproduce object using from dictionary staticmethod"""
data_list = self._data.tolist()
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'energies': self.energies, 'across': self.across_section,
'scross': self.scross_section, 'atom': self.absorbing_atom,
'edge': self.edge, 'source': self.source, 'calc': self.calc,
'formula': self.material_formula,
'HEADER': self._header.as_dict(), 'TAGS': self._parameters,
'c_atom': self._central_atom, 'xmu': data_list}
@staticmethod
def from_dict(xdict):
"""
Returns Xmu object from dictionary
"""
header = Header.from_dict(xdict['HEADER'])
return Xmu(header, xdict['TAGS'], xdict['c_atom'],
np.array(xdict['xmu']))
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "FeffParserError : " + self.msg
=======
from __future__ import unicode_literals
"""
This package provides the modules to perform FEFF IO.
FEFF: http://feffproject.org/feffproject-feff.html
"""
from .inputs import *
from .outputs import *
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
| Bismarrck/pymatgen | pymatgen/io/feff/__init__.py | Python | mit | 44,572 | [
"FEFF",
"pymatgen"
] | 0ba15de29cd944329fd3a4e3bd043ea3f0c923b28be89f7c33070507304f230a |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script computes the permanence and the number of switches of a discrete trait on the tree
trunk.
## Example
$ python3 trunktraitevolution.py -i input_tree.tree --input-format nexus --feature location
## Arguments:
#### mandatory arguments:
1. -i INPUT_FILE Input filepath of the tree file
2. --input-format INPUT_FORMAT String indicating tree file format (i.e. nexus, newick, phy, ...)
#### optional arguments:
1. -o/--out OUTPUT_FILE Output tree file name
2. -l/--label DATA_LABEL Additional label for the data contained in the tree file
3. --feature FEATURE_ANNOTATION Discrete trait to monitor for trunk switches
4. --trunk-threshold TRUNK_THRESHOLD Integer value indicating the lower bound to define the tree
trunk. When not defined, all the tree will be considered
4. --log LOG_LEVEL Log level for the routine (--log=INFO)
5. --log_to_file LOG_TO_FILE Log filename for the routine
## Compatibility
`trunktraitevolution` has been tested on Python 3.4
## Contributing
`trunktraitevolution` [is on GitHub](https://github.com/gattil/phylo-tools). Pull requests and bug
reports are welcome.
## Licence
`trunktraitevolution` is in the public domain under MIT licence
> The MIT License (MIT)
> Copyright (c) 2016 Lorenzo Gatti
> Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
> The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Import section (built-in modules|third-party modules)
import sys
import argparse
import dendropy
import os.path
import logging
import csv
# Authorship information
__project__ = 'phylo-tools'
__product__ = 'trunktraitevolution'
__editor__ = 'PyCharm'
__author__ = 'lorenzogatti'
__copyright__ = "Copyright 2016, Applied Computational Genomics Team (ACGT)"
__credits__ = ["Lorenzo Gatti"]
__license__ = "GPL"
__date__ = '16/06/16'
__version__ = "1.0"
__maintainer__ = "Lorenzo Gatti"
__email__ = "lorenzo.gatti@zhaw.ch"
__status__ = "Development"
# Routines
def arg_parser():
"""
This function parses the arguments passed from the command line to the script
Returns:
It returns an object containing all the recognised arguments properly formatted
"""
parser = argparse.ArgumentParser(prog='findtrunk.py',
description='Retrieve the phylogenetic trunk from the tree '
'topology using a reverse-tree-traversal '
'approach.')
parser.add_argument("-i", "--in", type=str, dest="input_file", required=True,
help='Input tree file')
parser.add_argument("--input-format", type=str, dest="input_format", required=True,
help='Input tree file format')
parser.add_argument("-o", "--out", dest="output_file", type=str, required=False,
default='', help='Output tree file')
parser.add_argument("--feature", dest="feature_annotation", type=str, required=True,
default='', help='Discrete trait')
parser.add_argument("--trunk-threshold", dest="trunk_threshold", type=int, required=False,
default=0, help='Trunk value threshold')
parser.add_argument("-l", "--label", dest="data_label", type=str, required=False,
default='', help='Label for the data contained in the tree')
parser.add_argument("--log", dest="log_level", type=str, required=False,
default='INFO', help='Log level for the routine (--log=INFO)')
parser.add_argument("--log_to_file", dest="log_to_file", type=bool, required=False,
default='', help='Log filename for the routine')
return parser.parse_args()
def main(args):
"""
This function executes the routines required by the program to identify the number of switches
and the permanence of the discrete trait on the tree trunk
`args` is a the object returned by `arg_parser` function.
"""
# Prepare output variables
if not args.output_file:
filename = os.path.dirname(args.input_file) + '/' + args.data_label + '_' \
+ args.feature_annotation
args.output_file = filename
# Prepare logging device
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
logging.getLogger(__product__)
if args.log_to_file:
logging.basicConfig(filename=args.log_file,
level=numeric_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
else:
args.log_file = os.path.dirname(args.input_file) + '/' + args.data_label + '_out.log'
logging.basicConfig(level=numeric_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logging.debug(__project__ + ":" + __product__ + " - Execution started")
# Read the tree file
tree = dendropy.Tree.get(path=args.input_file,
schema=args.input_format,
extract_comment_metadata=True)
feature_permanence = dict()
with open(args.output_file+'_switches.csv', 'w') as csvfile:
fieldnames = ['FROM-ID', 'TO-ID', 'F-AGE', 'T-AGE', 'DURATION', 'VFROM', 'VTO', 'C']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
sc = 0
# per each node in the tree topology, loop over the child nodes and retrieve the length
# of the branch grouped according to
for node in tree.preorder_node_iter():
#for node in tree.leaf_node_iter():
# check if the node has been already visited before
logging.debug(__project__ + ":" + __product__ + " - Visiting node " + str(node.label))
#if 'visited' not in node.annotations.values_as_dict().keys():
if not node._annotations.get_value('visited'):
# get the requested annotation for the parent node
#parent_annotation = node.annotations.values_as_dict()[args.feature_annotation]
parent_annotation = node._annotations.get_value(args.feature_annotation)
parent_id = node.label
#parent_height = node.annotations.values_as_dict()['height']
parent_height = node._annotations.get_value('height')
# Per each child node in the tree starting from the node selected in
# preorder-traversing
for child in node.child_nodes():
# Check if the child has been labelled with a number
if child.label:
# count the number of switches for the discrete trait occurring on the
# trunk of the tree
#if int(child.annotations.values_as_dict()['trunk']) > args.trunk_threshold:
if int(child._annotations.get_value('trunk')) > args.trunk_threshold:
# Get annotation of the current child node
child_annotation = child._annotations.get_value(args.feature_annotation)
# child_annotation = child.annotations.values_as_dict()[
# args.feature_annotation]
# Compute the permanence
if parent_annotation not in feature_permanence:
feature_permanence[parent_annotation] = {}
if child_annotation not in feature_permanence[parent_annotation]:
feature_permanence[parent_annotation][child_annotation] = \
child.edge.length
else:
feature_permanence[parent_annotation][child_annotation] += \
child.edge.length
# feature_permanence[child_annotation] += node.edge.length
# else:
# feature_permanence[child_annotation] = node.edge.length
# Call switches
if parent_annotation != child_annotation:
c = 1
sc += 1
else:
c = 0
# Store
writer.writerow({'FROM-ID': parent_id,
'TO-ID': child.label,
'F-AGE': parent_height,
#'T-AGE': child.annotations.values_as_dict()['height'],
'T-AGE': child._annotations.get_value('height'),
'DURATION': child.edge.length,
'VFROM': parent_annotation,
'VTO': child_annotation,
'C': c})
# Re-assigning internal values
parent_annotation = child_annotation
parent_id = child.label
#parent_height = child.annotations.values_as_dict()['height']
parent_height = child._annotations.get_value('height')
# Complete visiting the node, adding an annotation indicating the
# successful visit
child.annotations.add_new(name="visited", value=1)
logging.info(__project__ + ":" + __product__ + " - The discrete trait [" +
args.feature_annotation + '] shows ' + str(sc) + ' switches on the trunk')
with open(args.output_file+'_summary.csv', 'w') as csvfile:
fieldnames = ['VFROM', 'VTO', 'DURATION']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for vfrom in feature_permanence:
for vto in feature_permanence[vfrom]:
if vfrom == vto:
writer.writerow({'VFROM': vfrom,
'VTO': vto,
'DURATION': feature_permanence[vfrom][vto]})
# pprint.pprint(feature_permanence, width=1)
# Main execution routine
if __name__ == "__main__":
# Parse calling arguments
parsed_args = arg_parser()
# Call main routine
main(parsed_args)
# Exit program
sys.exit(0)
| gattil/phylo-tools | trunktraitevolution.py | Python | mit | 11,842 | [
"VisIt"
] | 7e0c648856aee4798bd8db747dd4a51e226d9ca0f8cd1e75063aa2b30c07feb1 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
VTKBot is an IRC bot
Copyright (C) 2010 Mathias De Maré
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2
of the License, no other.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import settings
from twisted.protocols.basic import LineOnlyReceiver
from twisted.internet.protocol import ClientFactory
from twisted.internet import reactor
from sqlalchemy import create_engine
from sqlalchemy.orm import clear_mappers
import re
import os
import imp
import logging
from plugin import Plugin
TEXT_COLOURS = {
"WHITE": "0",
"BLACK": "1",
"BLUE" : "2",
"GREEN": "3",
"RED" : "4",
}
class VTKBot(LineOnlyReceiver):
#========================
# CONNECTION SETUP #
#========================
def connectionMade(self):
#Authenticate
self.send_message("USER %s %s %s %s" % (self.factory.username, self.factory.host, self.factory.server, self.factory.realname))
self.send_message("NICK %s" % self.factory.nickname)
#Join channels
reactor.callLater(10, self.joinChannels)
#Execute 'connectionMade plugin commands'
reactor.callLater(11, self.on_connected)
def joinChannels(self):
for channel in self.factory.channels:
self.send_join(channel)
#========================
# SENDING MESSAGES #
#========================
#Send a raw IRC message
def send_message(self, message):
message = message + '\n'
message = message.encode('utf-8', 'ignore')
print message
self.transport.write(message)
#Send PONG back after receiving ping
def send_pong(self, target):
self.send_message("PONG %s" % target)
#Send JOIN message to enter a channel
def send_join(self, channel):
self.send_message("JOIN %s" % channel)
#Send PART message to leave a channel
def send_leave(self, channel, reason):
self.send_message("PART %s :%s" % (channel, reason))
def send_kick(self, channel, user, reason):
self.send_message("KICK %s %s :%s" % (channel, user, reason))
#Kill a nickname
def send_kill(self, nick, comment):
self.send_message("KILL %s : %s" % (nick, comment))
#Set user modes
def send_user_mode(self, target, mode):
self.send_message("MODE %s %s" % (target, mode))
def send_channel_message(self, channel, message, colour=None, bold=False):
if colour != None:
message = self.coloured_message(message, colour)
if bold == True:
message = self.bold_message(message)
self.send_message("PRIVMSG %s :%s" % (channel, message))
#Set channel modes
def send_channel_mode(self, target, mode, user=None):
if user == None:
self.send_message("MODE %s %s" % (target, mode))
else:
self.send_message("MODE %s %s %s" % (target, mode, user))
#Identify as operator
def send_oper(self, name="root", password="12345"):
self.send_message("OPER %s %s" % (name, password))
#Send private message to user
def send_private_message(self, nick, message):
self.send_message("PRIVMSG %s :%s" % (nick, message))
#========================
# RECEIVING MESSAGES #
#========================
#Received a raw IRC message
def lineReceived(self, message):
print message
#Try to decode the message -- http://en.wikipedia.org/wiki/Internet_Relay_Chat#Character_encoding
try:
message = message.decode('utf-8')
except:
try:
message = message.decode('iso-8859-1')
except:
message = message.decode('utf-8', 'ignore')
message = message.replace('\r', '').replace('\n', '')
#INVITE message
match = re.match(":([^ ]*?)!([^ ]*?)@([^ ]*?) INVITE ([^ ]*?) :(.*)", message)
if match:
self.on_invite(match.group(1), match.group(2), match.group(3), match.group(4), match.group(5))
return
#JOIN message
match = re.match(":([^ ]*?)!([^ ]*?)@([^ ]*?) JOIN :(.*)", message)
if match:
self.on_user_join(match.group(1), match.group(2), match.group(3), match.group(4))
return
#KICK message
match = re.match(":([^ ]*?)!([^ ]*?)@([^ ]*?) KICK ([^ ]*?) ([^ ]*?) :", message)
if match:
print 'KICKIII'
self.on_kick(match.group(1), match.group(2), match.group(3), match.group(4), match.group(5))
#NOTICE message
match = re.match("[^ ]* NOTICE ([^ ]*) :(.*)", message)
if match:
self.on_notice(match.group(1), match.group(2))
return
#PART message
match = re.match(":([^ ]*?)!([^ ]*?)@([^ ]*?) PART (.*?) :(.*)", message)
if match:
self.on_user_leave(match.group(1), match.group(2), match.group(3), match.group(4), match.group(5))
return
#PING message
match = re.match("PING ([^ ]*)", message)
if match:
self.on_ping(match.group(1))
return
#PRIVATE message
match = re.match(":([^ ]*?)!([^ ]*?)@([^ ]*?) PRIVMSG ([^ ]*?) :([^\n\r]*)", message)
if match:
if match.group(4) == self.factory.nickname:
self.on_private_message(match.group(1), match.group(2), match.group(3), match.group(5))
else:
self.on_channel_message(match.group(1), match.group(2), match.group(3), match.group(4), match.group(5))
return
#CODE 'You are banned'
match = re.match(":[^ ]*? 474 ([^ ]*?) ([^ ]*?) :(.*)", message)
if match:
self.on_banned_code(match.group(1), match.group(2))
return
#CODE 'Banlist
match = re.match(":[^ ]*? 367 (.*?) (.*?) (.*?) (.*?) (.*)", message)
if match:
self.banlist += [(match.group(2), match.group(3), match.group(4), match.group(5))]
return
#CODE 'End-of-banlist'
match = re.match(":[^ ]*? 368 (.*?) (.*?) :", message)
if match:
oldbanlist = self.banlist
self.banlist = []
self.on_banlist(oldbanlist)
return
#Server closes link
match = re.match("ERROR :Closing Link: ", message)
if match:
self.on_link_close()
return
#Received a banlist
def on_banlist(self, banlist):
pass
#Received a channel ban
def on_banned_code(self, nick, channel):
pass
#Received a channel creation message
def on_channel_create(self, channel, nick, nickmask, hostmask):
pass
#Received a channel message
def on_channel_message(self, nick, nickmask, hostmask, channel, message):
for plugin in self.factory.plugins:
if plugin.channel_message_rule != "":
match = re.match(plugin.channel_message_rule, message)
if match:
plugin.on_channel_message(self, nick, nickmask, hostmask, channel, message, match)
#Received a channel invitation
def on_invite(self, nick, nickmask, hostmask, invitee, channel):
pass
#Received a channel kick
def on_kick(self, nick, nickmask, hostmask, channel, target):
for plugin in self.factory.plugins:
if plugin.channel_kick_rule != "":
plugin.on_kick(self, nick, nickmask, hostmask, channel, target)
#Received a notice (careful when overriding, there are a lot of subnotices!)
def on_notice(self, nick, text):
#Channel creation
match = re.match("\*\*\* CHANCREATE: Channel ([^ ]*?) created by ([^ ]*?)\!([^ ]*?)@([^ ]*)", text)
if match:
self.on_channel_create(match.group(1), match.group(2), match.group(3), match.group(4))
return
#User quit
match = re.match("\*\*\* QUIT: Client exiting: ([^ ]*?)\!([^ ]*?)@([^ ]*?) ", text)
if match:
self.on_user_quit(match.group(1), match.group(2), match.group(3))
return
#User connect
match = re.match("\*\*\* CONNECT: Client connecting on port [0-9]*?: ([^ ]*?)\!([^ ]*?)@([^ ]*?) ", text)
if match:
self.on_user_connect(match.group(1), match.group(2), match.group(3))
return
#Nickname changed
match = re.match("\*\*\* NICK: User ([^ ]*?) changed their nickname to ([^ ]*)\s+", text)
if match:
self.on_user_changed_nickname(match.group(1), match.group(2))
return
#Received a PING message (automatically answering with a PONG message)
def on_ping(self, target):
self.send_pong(target)
#Received a private message
def on_private_message(self, nick, nickmask, hostmask, message):
pass
#A user changed his nickname
def on_user_changed_nickname(self, old_nick, new_nick):
pass
#A user connected to the server
def on_user_connect(self, nick, nickmask, hostmask):
pass
#A user joined a channel
def on_user_join(self, nick, nickmask, hostmask, channel):
for plugin in self.factory.plugins:
if plugin.channel_message_rule != "":
plugin.on_user_join(self, nick, nickmask, hostmask, channel)
def on_user_leave(self, nick, nickmask, hostmask, channel, reason):
pass
#A user quit
def on_user_quit(self, nick, nickmask, hostmask):
pass
def on_connected(self):
for plugin in self.factory.plugins:
if plugin.connected_rule != "":
plugin.on_connected(self)
def on_link_close(self):
self.transport.loseConnection() #Lose the connection, let the factory reconnect
#=======================
# MODIFYING MESSAGES #
#=======================
def coloured_message(self, message, colour):
return chr(3) + TEXT_COLOURS[colour] + message + chr(3)
def bold_message(self, message):
return chr(2) + message + chr(2)
class VTKBotFactory(ClientFactory):
protocol = VTKBot
def __init__(self, realname="RealName", host="localhost", server="localhost", port=9999, nickname="NickName", username="UserName",
databasefile="sqlite:///vtk.db", channels=["#test", "#test2"]):
logging.basicConfig(filename=settings.core_logfile,level=settings.core_loglevel)
self.logger = logging.getLogger('Factory')
self.nickname = nickname
self.username = username
self.realname = realname
self.host = host
self.server = server
self.port = port
self.databasefile = databasefile
self.engine = create_engine(self.databasefile, echo=True)
self.channels = channels
self.load_plugins()
def clientConnectionFailed(self, connector, reason):
"We didn't manage to establish a connection to the server. Wait some time before trying again."
self.logger.warning('Failed to establish a connection to the server. Trying again later...')
reactor.callLater(180, connector.connect)
def clientConnectionLost(self, connector, reason):
"We lost the connection to the server. Try again."
self.logger.warning('Lost connection to the server. Trying again...')
reactor.callLater(60, connector.connect)
def load_plugins(self):
#Clear sqlalchemy data from old plugins
clear_mappers()
#Load source code
if hasattr(settings, 'plugin_dir'):
plugin_dir = settings.plugin_dir
else:
plugin_dir = 'plugins'
for candidate_file in os.listdir(plugin_dir):
if hasattr(settings, 'plugin_list') and not (candidate_file in settings.plugin_list):
continue #There's a list of allowed plugins, and ours is not in it
try:
module = __import__(plugin_dir + '.' + candidate_file[:-3])
reload(module)
except Exception, (instance):
print instance
#See what classes we managed to load
pluginclasses = Plugin.__subclasses__()
self.logger.info('Plugins: ' + str(pluginclasses))
self.plugins = []
for pluginclass in pluginclasses:
plugin = object.__new__(pluginclass)
plugin.__init__(self)
plugin.create_database_tables()
self.plugins.append(plugin)
def endswith(self, candidate, plugin_list):
for plugin in plugin_list:
if candidate.endswith(plugin):
return True
return False
| Mathiasdm/VTKBot | core.py | Python | gpl-2.0 | 13,180 | [
"VTK"
] | 733ae752c693fa661b550396e217ce2a39f0af50ac77376acf22e7e4a248788f |
# verification of the implementation
------------------------------------
# for AGNs
# reproduce the plots from Bongiorno et al. 2010
# writes plots in data/eRoMok/
python reproduce_bongiorno_2010.py
BOX = MD10 or NUGC
#writes a summary file containing all information for each snapshot
#---------------------------
python $BOX_box-get-header.py
# rewrite rockstar ascii catalogs in smaller fits files with 20e6 lines each + halo mass cut.
python $BOX-write-clusterFiles.py # for the cluster calculations
python $BOX-write-smallFile.py # for the AGN calculations
# outputs in /work_agn or work_cluster
python2.7 MD10-check-small-file-1pt-fun.py
python2.7 MD10-check-small-file-1pt-fun-plots.py
# outputs in wwwDir/eRoMok/
# ALL look OK
# writes in the catalog dir
#---------------------------
# add stellar masses according to Moster et al. 2013
# to be updated to the Moster et al. 2017 model EMERGE
python2.7 MD10_add_Ms.py # (and all other scripts Ms_?)
python2.7 MD10-check-MS-file-1pt-fun.py
python2.7 MD10-check-MS-file-1pt-fun-plots.py
python2.7 plot_SMHMR.py
# measures the stellar mass function.
# Is now done in the tabulate duty cycle step
python2.7 measure_SMF.py
# outputs in $BOX_DIR/duty_cycle
python2.7 plot_SMF.py
# outputs in os.path.join(os.environ['MD10'],"results","stellar_mass_function", "images")
#########################################33
# tabulates the duty cyle as a function of stellar mass
# forces the snapshot to reproduce the luminosity function from Bongiorno 2016
python2.7 MD10_tabulate_duty_cycle.py
# outputs in $BOX_DIR/duty_cycle
# add a flag for the activity to reproduce the host galaxy stellar mass function
# output in $MD10/work_agn/*_DC.fits
python2.7 MD10_add_AGN_activity.py
# add LSAR for AGNs using Bongiorno et al. 2016
# output in $MD10/work_agn/*_LSAR.fits
python2.7 MD10_add_LSAR.py
python2.7 MD10_add_LSAR_1.py
python2.7 MD10_add_LSAR_2.py
python2.7 MD10_add_LSAR_3.py
python2.7 MD10_add_LSAR_4.py
# create a single AGN file per snapshot
# output in $MD10/catalogs/*_LSAR.fits
python2.7 MD10_create_AGN_summary_file.py
# add obscuration following Buchner et al. 2016
# output in $MD10/work_agn/*_NH.fits
python2.7 MD10_add_AGN_obscuration.py
# add Xray luminosities for AGNs using Bongiorno et al. 2016 and Xray for clusters using Mantz et al. 2016
python MD10_add_Xray.py
python MD10_add_Xray_1.py
python MD10_add_Xray_2.py
python MD10_add_Xray_3.py
python MD10_add_Xray_4.py
python MD10_add_Xray_5.py
# outputs in $BOX_DIR/work_agn
#selects active AGNS and write the AGN snapshot in the catalog dir
python MD10_create_AGN_summary_file.py
# outputs in $BOX_DIR/catalogs/
# add 4MOST targets on top
python MD10_add_4MOST_AGN.py
python MD10_add_4MOST_CLUSTERS_bcg.py
python MD10_add_4MOST_CLUSTERS_members.py
python MD10_add_4MOST_COSMO.py
# 4MOST light cone
# eRosita light cone
python measure_SMF.py
python plot_SMF.py
python measure_HMF_tracer.py
python plot_HMF.py
python MD10-pie-plot.py
# AFTER LC is created
python MD10_select_AGN_lightcone_eRositaFlux_per_file.py
# read and writes here $MD10/light-cone/
# TB UPDATE FROM HERE ON
# plots and results
python plot_AGN_HGMF_duty_cycle.py
python plot_slice_simulation.py
#-------------------------------------------------------------------------
python plot_cluster_scaling_relations.py
python plot_LFX.py
python plot_Lxcut_Mhalo.py
python plot-Ms-xray.py
python test.py
| JohanComparat/nbody-npt-functions | bin/bin_SMHMr/run_analysis.py | Python | cc0-1.0 | 3,414 | [
"Galaxy"
] | 2377285a8ca4b5ff0bcc5d212becf0d00535ce063382de9920fca115eff087ae |
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
# -------- GLOBAL SCALAR DEFINITIONS --------------
# excitation - initial conditoons
ex_type = 'plane'
alambda = 0.1 # wavelength
t_ex_sig = 1.0*alambda # width in time (pulse only)
x_ex_sig = 1.0*alambda # width in the x-direction (pulse)
toff_ex = 0.0 # offset in time
xoff_ex = 0.02 # offset in the x-direction
yoff_ex = 0.5 # offset in the y -direction
omega = 2.0*np.pi/alambda # frequency
k = 2.0*np.pi*alambda
amp_Ex = 1.
amp_Ey = 1.
amp_Hz = 1.
# refractive index n = sqrt(epso*muo)
rip_shape = 'interlayered'
# set background refractive index
epsilon_def = 1
epso = 1.0
muo = 1.0
# set moving refractive index parameters
x_vrip_e = 0.6
y_vrip_e = 0.0
x_vrip_m = x_vrip_e
y_vrip_m = y_vrip_e
prip = 0.1
xoff_rip_e = 0.2
yoff_rip_e = 0.0
xoff_rip_m = xoff_rip_e
yoff_rip_m = yoff_rip_e
sig_rip_e = .1
sig_rip_m = sig_rip_e
deltan = prip*(np.sqrt(epso*muo))
atampe = deltan*(2.0*1.5+deltan)
atampu = deltan*(2.0*1.5+deltan)
# multilayered definition
N_la = 10
N_lb = N_la -1
t_la = 0.0015
t_lb = 0.0050
la_e = 2.5
la_m = 2.5
lb_e = 1.0
lb_m = 1.0
# pre-calculations for wave propagation
v = 1/np.sqrt(epso*muo)
vx_ex = v
vy_ex = 0.0
kx_ex = k
ky_ex = 0.0
# Grid - mesh settings
x_lower=0.; x_upper=10.; mx = np.floor(50*(x_upper-x_lower)/alambda)
y_lower=0
if rip_shape=='interlayered':
y_upper = N_la*t_la + N_lb*t_lb
y_ex_sig = 0.95*y_upper
my = np.floor((y_upper-y_lower)*10000)
mlp = (t_la+t_lb)*10000
else:
y_upper = 1
y_ex_sig = x_ex_sig
my = np.floor(50*(y_upper-y_lower)/alambda)
# -------- GLOBAL FUNCTION DEFINITIONS --------------
def refind(t,X,Y):
"""
deps = refind(t,x,y)
This function returns the refractive index map based on general definitions set earlier,
Gaussian cases support moving RIPs.
x,y are the coordinate of the grid centers state.grid.e_j.centers, e_j = x,y
"""
y,x = np.meshgrid(Y,X)
deps = np.empty( [2,len(X),len(Y)], order='F')
if rip_shape=='gaussian2d':
deps[0,:,:] = atampe*np.exp(-((x-x_vrip_e*t-xoff_rip_e)**2 + (y-y_vrip_e*t-yoff_rip_e)**2)/sig_rip_e**2) + epso
deps[1,:,:] = atampu*np.exp(-((x-x_vrip_m*t-xoff_rip_m)**2 + (y-y_vrip_m*t-yoff_rip_m)**2)/sig_rip_m**2) + muo
elif rip_shape=='gaussian1dx':
deps[0,:,:] = atampe*np.exp(-((x-x_vrip_e*t-xoff_rip_e)**2)/sig_rip_e**2) + epso
deps[1,:,:] = atampu*np.exp(-((x-x_vrip_m*t-xoff_rip_m)**2)/sig_rip_m**2) + muo
elif rip_shape=='gaussian1dy':
deps[0,:,:] = atampe*np.exp(-((y-y_vrip_e*t-yoff_rip_e)**2)/sig_rip_e**2) + epso
deps[1,:,:] = atampu*np.exp(-((y-y_vrip_m*t-yoff_rip_m)**2)/sig_rip_m**2) + muo
elif rip_shape=='homogeneous':
deps[0,:,:] = epso
deps[1,:,:] = muo
elif rip_shape=='interface':
deps[0,:,:] = 1*(x<x_upper/2) + 4*(x>=x_upper/2)
deps[1,:,:] = 1*(x<0.45) + 4*(x>=0.45)
elif rip_shape=='interlayered':
deps[0,:,:] = lb_e
deps[1,:,:] = lb_m
for m in range(0,N_la):
deps[0,:,m*mlp:m*mlp+(t_la*10000)] = la_e
deps[1,:,m*mlp:m*mlp+(t_la*10000)] = la_m
return deps
def update_aux(solver,state):
grid = state.grid
y = state.grid.y.centers
x = state.grid.x.centers
td = state.t
oldaux = state.aux.copy(order='F')
state.aux = setaux(td,x,y)
state.q = state.q
# next function might be redundant since it already exists as deltan
def setaux(t,x,y):
aux = np.empty( [2,len(y),len(x)], order='F')
aux = refind(t,x,y)
return aux
def setaux_lower(state,dim,t,auxbc,num_ghost):
grid = state.grid
X = grid.x.centers_with_ghost(num_ghost)[:num_ghost]
Y = grid.y.centers_with_ghost(num_ghost)
tl = state.t
auxbc[:,:num_ghost,:] = refind(tl,X,Y)
return auxbc
def setaux_upper(state,dim,t,auxbc,num_ghost):
grid = state.grid
X = grid.x.centers_with_ghost(num_ghost)[-num_ghost:]
Y = grid.y.centers_with_ghost(num_ghost)
tu = state.t
auxbc[:,-num_ghost:,:] = refind(tu,X,Y)
return auxbc
def scattering_bc(state,dim,t,qbc,num_ghost):
"""
EM scattering boundary conditions with three components in TM-mode Ex, Ey, Hz.
"""
grid = state.grid
X = grid.x.centers_with_ghost(num_ghost)[:num_ghost]
Y = grid.y.centers_with_ghost(num_ghost)
ts = state.t
y,x = np.meshgrid(Y,X)
aux_left_bc = refind(t,X,Y)
t0 = 0.05
pulseshape = np.zeros( [len(X),len(Y)], order='F')
harmonic = np.zeros( [len(X),len(Y)], order='F')
if ex_type=='plane':
pulseshape = 1.0
harmonic = np.sin(kx_ex*x + ky_ex*y - omega*ts)
elif ex_type=='gauss-beam':
pulseshape = np.exp(-(y - yoff_ex)**2/y_ex_sig**2)
harmonic = np.sin(kx_ex*x + ky_ex*y - omega*ts)
elif ex_type=='gauss_pulse':
pulseshape = np.exp(-(x - xoff_ex - vx_ex*(ts-t0))**2/x_ex_sig**2)*np.exp(-(y - yoff_ex - vy_ex*(ts-t0))**2/y_ex_sig**2)
harmonic = np.sin(kx_ex*x + ky_ex*y - omega*ts)
elif ex_type=='plane_pulse':
pulseshape = np.exp(-(x - xoff_ex - vx_ex*(ts-t0))**2/x_ex_sig**2)
harmonic = np.sin(kx_ex*x + ky_ex*y - omega*ts)
elif ex_type=='simple_pulse2D':
pulseshape = np.exp(-(x - xoff_ex - vx_ex*(ts-t0))**2/x_ex_sig**2)*np.exp(-(y - yoff_ex - vy_ex*(ts-t0))**2/y_ex_sig**2)
harmonic = 1.0
elif ex_type=='simple_pulse2D_x':
pulseshape = np.exp(-(x - xoff_ex - vx_ex*(ts-t0))**2/x_ex_sig**2)
harmonic = 1.0
qbc[0,:num_ghost,:] = amp_Ex*pulseshape*harmonic*aux_left_bc[0,:,:]
qbc[1,:num_ghost,:] = amp_Ey*pulseshape*harmonic*aux_left_bc[0,:,:]
qbc[2,:num_ghost,:] = amp_Hz*pulseshape*harmonic*aux_left_bc[1,:,:]
return qbc
def qinit(state):
"""
Initial conditions in simulation grid for electromagnetic components q
"""
grid = state.grid
X = grid.x.centers
Y = grid.y.centers
ts = state.t
y,x = np.meshgrid(Y,X)
r2 = (x-1.)**2 + (y-0.5)**2
state.q[0,:,:] = 0.0
state.q[1,:,:] = 0.0
state.q[2,:,:] = 0.0
# -------- MAIN SCRIPT --------------
def em2D(kernel_language='Fortran',iplot=False,htmlplot=False,use_petsc=True,save_outdir='./_output',solver_type='sharpclaw'):
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
print v,y_upper,mx,my
# Solver settings
if solver_type=='classic':
solver=pyclaw.ClawSolver2D()
solver.dimensional_split=False
solver.limiters = pyclaw.limiters.tvd.MC
elif solver_type=='sharpclaw':
solver=pyclaw.SharpClawSolver2D()
solver.num_waves = 2
solver.weno_order = 5
# solver.dt_initial=0.005
# solver.max_steps = 1000000
import maxwell_2d
solver.rp = maxwell_2d
solver.fwave = True
solver.cfl_max = 2.45
solver.cfl_desired = 2.4
# solver.before_step = update_aux
# define number of waves (eqn) and aux (eps,mu)
num_eqn = 3
num_aux = 2
# abstract domain and state setup
x_dime = pyclaw.Dimension('x',x_lower,x_upper,mx)
y_dime = pyclaw.Dimension('y',y_lower,y_upper,my)
domain = pyclaw.Domain([x_dime,y_dime])
state = pyclaw.State(domain,num_eqn,num_aux)
grid = state.grid
X = grid.x.centers
Y = grid.y.centers
tini = state.t
state.aux = refind(tini,X,Y)
# Boundary conditions
solver.user_bc_lower = scattering_bc
solver.bc_lower[0] = pyclaw.BC.custom
solver.bc_upper[0] = pyclaw.BC.extrap
solver.bc_lower[1] = pyclaw.BC.extrap
solver.bc_upper[1] = pyclaw.BC.extrap
solver.user_aux_bc_lower = setaux_lower
solver.user_aux_bc_upper = setaux_upper
solver.aux_bc_lower[0] = pyclaw.BC.custom
solver.aux_bc_upper[0] = pyclaw.BC.custom
solver.aux_bc_lower[1] = pyclaw.BC.wall
solver.aux_bc_upper[1] = pyclaw.BC.wall
# Initial solution
qinit(state)
# controller
claw = pyclaw.Controller()
claw.keep_copy = True
claw.tfinal = 2
claw.num_output_times = 10
claw.solver = solver
claw.solution = pyclaw.Solution(state,domain)
claw.outdir = save_outdir
claw.write_aux_always = True
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=save_outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=save_outdir,file_format=claw.output_format)
return claw
if __name__=="__main__":
import sys
from clawpack.pyclaw.util import run_app_from_main
output = run_app_from_main(em2D)
| nthakkar/emclaw | deprecated/maxwell_2d_homogeneous/maxwell_interlayer.py | Python | gpl-2.0 | 8,040 | [
"Gaussian"
] | fc12e604f48fdbcd2bac24e9d8d312acf68d939948efc2a64b948c967524fcc3 |
import os, sys
import re
import math
import numpy
from collections import defaultdict
import pysam
from grit.files.reads import RNAseqReads
import multiprocessing
import queue
from grit.lib.multiprocessing_utils import ProcessSafeOPStream, fork_and_wait
import gzip, io
import pickle
DATA_BASE_DIR = os.path.abspath(os.path.dirname(__file__) + "/../data/")
RNASEQ_SORT_INDICES = numpy.array((3,4,1,2,6,7))
class TFs(pysam.TabixFile):
pass
def load_GENCODE_names(fname):
gene_name_map = defaultdict(list)
with io.TextIOWrapper(gzip.open(fname, 'rb')) as fp:
for line in fp:
if line.startswith("#"): continue
data = line.split()
if data[2] != 'gene': continue
ensemble_id = re.findall("ID=(.*?)\.\d+;", line)[0]
gene_name = re.findall("gene_name=(.*?);", line)[0]
contig, start, stop = data[1], int(data[3]), int(data[4])
gene_name_map[gene_name.upper()].append(ensemble_id)
return gene_name_map
def load_GENCODE_genes(fname):
genes = []
with io.TextIOWrapper(gzip.open(fname, 'rb')) as fp:
for line in fp:
if line.startswith("#"): continue
data = line.split()
if data[2] != 'gene': continue
ensemble_id = re.findall("ID=(.*?)\.\d+;", line)[0]
gene_name = re.findall("gene_name=(.*?);", line)[0]
contig, start, stop = data[0], int(data[3]), int(data[4])
genes.append([contig, start, stop, gene_name, ensemble_id])
return genes
def group_overlapping_intervals(intervals, max_size=10000):
intervals = sorted(intervals)
if len(intervals) == 0: return []
curr_start, curr_stop = intervals[0][0], intervals[0][1]
merged_intervals = [([curr_start, curr_stop], [intervals[0],]),]
for interval in intervals[1:]:
if curr_stop-curr_start > max_size or interval[0] > curr_stop:
curr_start, curr_stop = interval[0], interval[1]
merged_intervals.append(
([curr_start, curr_stop], [interval,]) )
else:
curr_stop = max(interval[1], curr_stop)
merged_intervals[-1][0][1] = curr_stop
merged_intervals[-1][1].append(interval)
return merged_intervals
def load_enhancers(tfs):
enhancers = {}
for contig in tfs.contigs:
intervals = []
for contig, start, stop, tf in tfs.fetch(contig):
intervals.append((int(start), int(stop)))
merged_intevals = sorted(
x[0] for x in group_overlapping_intervals(intervals) )
enhancers[contig] = merged_intevals
return enhancers
def load_tf_gene_mapping(fname=os.path.join(
DATA_BASE_DIR, "ENCODE_TFS.target.gene.map.txt")):
tf_gene_map = defaultdict(list)
with open(fname) as fp:
for line in fp:
data = line.split()
if len(data) == 1:
print( data )
continue
tf_gene_map[data[0]].extend(data[1].split(","))
gene_tf_map = {}
for tf, genes in tf_gene_map.items():
for gene in genes:
gene_tf_map[gene] = tf
return tf_gene_map, gene_tf_map
def load_tf_sites(fname):
def extract_pos_and_tfs(line):
data = line.strip().split()
chrm, start, stop = data[0], int(data[1]), int(data[2])
return (chrm, start, stop), data[-1].split(",")
tf_positions = defaultdict(list)
fp = pysam.TabixFile(fname)
for contig in fp.contigs:
for i, record in enumerate(fp.fetch(contig)):
region, tfs = extract_pos_and_tfs(record)
for tf in tfs:
tf_positions[tf].append(region)
if i > 1000: break
fp.close()
return dict(tf_positions)
def load_expression(fname=os.path.join(
DATA_BASE_DIR, "Het_Project.hg19_mm9_RSEM_gene_expression.txt")):
header = None
expression = {}
with open(fname) as fp:
for i, line in enumerate(fp):
if i == 0:
header = line.split()[1:]
continue
data = line.split()
gene = data[0].split('.')[0]
expression[gene] = numpy.array(data[1:], dtype=float)[
RNASEQ_SORT_INDICES]
header = [header[i] for i in RNASEQ_SORT_INDICES]
return header, expression
def load_tads(mouse_fname=os.path.join(
DATA_BASE_DIR, "./called_TADS/MouseES.HIC.combined.domain.bed"),
human_fname=os.path.join(
DATA_BASE_DIR, "./called_TADS/IMR90.HIC.combined.domain.bed")):
tads = defaultdict(set)
with open(mouse_fname) as fp:
for line in fp:
contig, start, stop = line.split()
tads['mm9_'+contig].add(int(start))
tads['mm9_'+contig].add(int(stop))
with open(human_fname) as fp:
for line in fp:
contig, start, stop = line.split()
tads['hg19_'+contig].add(int(start))
tads['hg19_'+contig].add(int(stop))
for key, bndries in list(tads.items()):
tads[key] = numpy.array(sorted(bndries))
return dict(tads)
def load_tf_genes():
try:
with open('pickled_genes.obj', 'rb') as fp:
return pickle.load(fp)
except FileNotFoundError:
pass
m4_ann_fname = os.path.join(
DATA_BASE_DIR, "gencode.vM4.annotation.gff3.gz")
hg19_ann_fname = os.path.join(
DATA_BASE_DIR, "gencode.v19.annotation.gff3.gz")
tf_gene_map, gene_tf_map = load_tf_gene_mapping()
all_genes = []
for data in load_GENCODE_genes( hg19_ann_fname ):
data[0] = 'hg19_' + data[0]
try:
tf = gene_tf_map[data[-1]]
except KeyError:
tf = 'NONE'
continue
all_genes.append(data + [tf,])
for data in load_GENCODE_genes( m4_ann_fname ):
data[0] = 'mm9_' + data[0]
try:
tf = gene_tf_map[data[-1]]
except KeyError:
tf = 'NONE'
continue
all_genes.append(data + [tf,])
all_genes = sorted(all_genes)
with open('pickled_genes.obj', 'wb') as ofp:
pickle.dump(all_genes, ofp)
return all_genes
class ATACSeq():
def __init__(self):
base = "/data/heterokaryon/ATAC-Seq/wigs/hg19_mm9/"
all_samples = ["16hr_A", "16hr_rep2", "16hr_rep3",
"3hr_A", "3hr_rep3",
"48hr_A", "48hr_rep2",
"48hr_rep3", "CC_rep2 MRC5_rep2"]
sample_prefixes = [
'3hr_rep1', '3hr_rep3',
'16hr_rep2', '16hr_rep3',
'48hr_rep2', '48hr_rep3']
self.all_signal_coverage = []
for sample_prefix in sample_prefixes:
fname = os.path.join(base, sample_prefix) + ".bedgraph.gz"
self.all_signal_coverage.append(pysam.TabixFile(fname))
def extract_signal_in_region(self, contig, start, stop):
rv = []
for signal_cov in self.all_signal_coverage:
res = signal_cov.fetch(contig, start, stop)
rv.append( sum( float(x.split()[-1]) for x in res ) )
return numpy.array(rv)
def build_signal_coverage_array(self, contig, start, stop):
rv = numpy.zeros(
(len(self.all_signal_coverage), stop-start), dtype=float)
for i, signal_cov in enumerate(self.all_signal_coverage):
res = signal_cov.fetch(contig, start, stop)
for contig, r_start, r_stop, signal in (x.split() for x in res):
rv[i, int(r_start)-start:int(r_stop)-start] = float(signal)
return rv
def tf_bs_parser(line):
print( line )
data = line.split()
return (data[0], int(data[1]), int(data[2]), data[3])
def cov_change(exp):
def calc_zscore(x1s, y1s):
x1_mu = sum(x1s)/len(x1s)
x1_var = sum((x - x1_mu)**2 for x in x1s)/len(x1s)
y1_mu = sum(y1s)/len(y1s)
y1_var = sum((y - y1_mu)**2 for y in y1s)/len(y1s)
return (y1_mu - x1_mu)/math.sqrt(
x1_var/len(x1s) + y1_var/len(y1s) + 1)
z1 = calc_zscore((exp[0], exp[1]), (exp[2], exp[3]))
z2 = calc_zscore((exp[2], exp[3]), (exp[4], exp[5]))
return z1, z2
def find_active_enhancers_in_tad(contig, tad_start, tad_stop,
tfs, tf_genes, all_atacseq,
hg19_enhancers_ofp, mm9_enhancers_ofp):
local_genes = [gene[4] for gene in tf_genes
if gene[0] == contig
and not( gene[2] < tad_start or gene[1] > tad_stop )]
#if len(local_genes) == 0: return
#print( contig, tad_start, tad_stop, file=sys.stderr )
local_tfbs = [x.split()for x in tfs.fetch(
contig, tad_start, tad_stop) ]
local_tfbs = sorted((int(x[1]), int(x[2]), x[3])
for x in local_tfbs
if int(x[2]) - int(x[1]) < 1000)
enhancers = group_overlapping_intervals(local_tfbs)
filtered_enhancers = []
for enhancer in enhancers:
e_length = enhancer[0][1]-enhancer[0][0]+1
cov = all_atacseq.build_signal_coverage_array(
contig, enhancer[0][0], enhancer[0][1])
if cov.sum(1).max()/e_length < 1e-3: continue
#print(cov.sum(1)/e_length)
#noisy_cov = numpy.random.random(6)/10 + cov.sum(1)/e_length
z1, z2 = cov_change(cov.sum(1)) # /e_length
score = max(abs(z1), abs(z2))
#print( z1, z2, cov.sum(1) ) # /e_length
#if score < 1: continue
#assert False
filtered_enhancers.append((enhancer, cov, score))
if len(filtered_enhancers) == 0: return
if contig.startswith('hg19'):
new_contig = contig[5:]
ofp = hg19_enhancers_ofp
else:
new_contig = contig[4:]
ofp = mm9_enhancers_ofp
for enh, cov, score in filtered_enhancers:
ofp.write("%s\t%i\t%i\t%s\t%i\t.\n" % (
new_contig, enh[0][0], enh[0][1],
'enhancer', min(1000, int(score*50))))
for tf_start, tf_stop, name in enh[1]:
rel_start = tf_start - enh[0][0]
rel_stop = tf_stop - enh[0][0]
if cov[:,rel_start:rel_stop].sum() == 0: continue
z1, z2 = cov_change(cov[:,rel_start:rel_stop].sum(1)) # /e_length
#print( rel_start, rel_stop, cov.shape, cov[:,rel_start:rel_stop].sum(1) )
score = max(abs(z1), abs(z2))
if score < 1: continue
ofp.write("%s\t%i\t%i\t%s\t%i\t.\n" % (
new_contig, tf_start, tf_stop,
name, min(1000, int(score*50))))
return
def worker( tads_queue, hg19_enhancers_ofp, mm9_enhancers_ofp):
tfs = TFs(os.path.join(DATA_BASE_DIR, "ENCODE_TFS.bed.gz"))
exp_header, expression = load_expression()
tf_genes = load_tf_genes()
all_atacseq = ATACSeq()
initial_size = tads_queue.qsize()
while tads_queue.qsize() > 0:
try:
contig, tad_start, tad_stop = tads_queue.get(0.1)
except queue.Empty:
break
print( tads_queue.qsize(), initial_size, file=sys.stderr )
find_active_enhancers_in_tad(
contig, tad_start, tad_stop,
tfs, tf_genes, all_atacseq,
hg19_enhancers_ofp, mm9_enhancers_ofp)
os._exit(0)
def main():
tads = load_tads()
tfs = TFs(os.path.join(DATA_BASE_DIR, "ENCODE_TFS.bed.gz"))
exp_header, expression = load_expression()
tf_genes = load_tf_genes()
all_atacseq = ATACSeq()
hg19_enhancers_ofp = ProcessSafeOPStream(open("enhancers.hg19.bed", "w"))
hg19_enhancers_ofp.write("track type=bed name=hg19_active_enhancers useScore=1\n")
mm9_enhancers_ofp = ProcessSafeOPStream(open("enhancers.mm9.bed", "w"))
mm9_enhancers_ofp.write("track type=bed name=mm9_active_enhancers useScore=1\n")
tads_queue = multiprocessing.Queue()
for contig, tad_bndrys in sorted(tads.items()):
for tad_start, tad_stop in zip(tad_bndrys[:-1], tad_bndrys[1:]):
tads_queue.put((contig, tad_start, tad_stop))
args = [tads_queue, hg19_enhancers_ofp, mm9_enhancers_ofp]
fork_and_wait(24, worker, args)
hg19_enhancers_ofp.close()
mm9_enhancers_ofp.close()
#print( tads )
assert False
print("type\tname\tgene_ens_id\t%s" % "\t".join(exp_header))
for tf_symbol, positions in list(tf_positions.items()):
if tf_symbol not in gene_name_map:
print(tf_symbol, file=sys.stderr) #, gene_name_map[tf]
else:
for gene_id in gene_name_map[tf_symbol]:
if gene_id not in expression:
print(("EXP", tf_symbol, gene_id,
gene_name_map[tf_symbol] ), file=sys.stderr)
else:
print("%s\t%s\t%s\t%s" % (
"GENE_EXP", gene_id, tf_symbol, expression[gene_id]))
if __name__ == '__main__':
main()
#print bigbed_to_bed("chipseq_track_proximal.bb")
#CS = BigBedFile(open("chipseq_track_proximal.bb"))
#print dir(CS)
#print CS.get("chr1",0,1000000)
| nboley/regulatory_network_tools | src/build_labeled_graph.py | Python | gpl-3.0 | 13,124 | [
"pysam"
] | 0142c59235653a92fd4b046465783330cfbeb8a7e4ca45630939965d78f6f248 |
import warnings
import torch
from gpytorch import settings
from gpytorch.distributions import MultitaskMultivariateNormal
from gpytorch.lazy import BlockDiagLazyTensor
from gpytorch.likelihoods import Likelihood
from ..approximate_gp import ApproximateGP
from ..gp import GP
class _DeepGPVariationalStrategy(object):
def __init__(self, model):
self.model = model
@property
def sub_variational_strategies(self):
if not hasattr(self, "_sub_variational_strategies_memo"):
self._sub_variational_strategies_memo = [
module.variational_strategy for module in self.model.modules() if isinstance(module, ApproximateGP)
]
return self._sub_variational_strategies_memo
def kl_divergence(self):
return sum(strategy.kl_divergence().sum() for strategy in self.sub_variational_strategies)
class DeepGPLayer(ApproximateGP):
"""
Represents a layer in a deep GP where inference is performed via the doubly stochastic method of
Salimbeni et al., 2017. Upon calling, instead of returning a variational distribution q(f), returns samples
from the variational distribution.
See the documentation for __call__ below for more details below. Note that the behavior of __call__
will change to be much more elegant with multiple batch dimensions; however, the interface doesn't really
change.
:param ~gpytorch.variational.VariationalStrategy variational_strategy: Strategy for
changing q(u) -> q(f) (see other VI docs)
:param int input_dims`: Dimensionality of input data expected by each GP
:param int output_dims: (default None) Number of GPs in this layer, equivalent to
output dimensionality. If set to `None`, then the output dimension will be squashed.
Forward data through this hidden GP layer. The output is a MultitaskMultivariateNormal distribution
(or MultivariateNormal distribution is output_dims=None).
If the input is >=2 dimensional Tensor (e.g. `n x d`), we pass the input through each hidden GP,
resulting in a `n x h` multitask Gaussian distribution (where all of the `h` tasks represent an
output dimension and are independent from one another). We then draw `s` samples from these Gaussians,
resulting in a `s x n x h` MultitaskMultivariateNormal distribution.
If the input is a >=3 dimensional Tensor, and the `are_samples=True` kwarg is set, then we assume that
the outermost batch dimension is a samples dimension. The output will have the same number of samples.
For example, a `s x b x n x d` input will result in a `s x b x n x h` MultitaskMultivariateNormal distribution.
The goal of these last two points is that if you have a tensor `x` that is `n x d`, then
>>> hidden_gp2(hidden_gp(x))
will just work, and return a tensor of size `s x n x h2`, where `h2` is the output dimensionality of
hidden_gp2. In this way, hidden GP layers are easily composable.
"""
def __init__(self, variational_strategy, input_dims, output_dims):
super(DeepGPLayer, self).__init__(variational_strategy)
self.input_dims = input_dims
self.output_dims = output_dims
def forward(self, x):
raise NotImplementedError
def __call__(self, inputs, are_samples=False, **kwargs):
deterministic_inputs = not are_samples
if isinstance(inputs, MultitaskMultivariateNormal):
inputs = torch.distributions.Normal(loc=inputs.mean, scale=inputs.variance.sqrt()).rsample()
deterministic_inputs = False
if settings.debug.on():
if not torch.is_tensor(inputs):
raise ValueError(
"`inputs` should either be a MultitaskMultivariateNormal or a Tensor, got "
f"{inputs.__class__.__Name__}"
)
if inputs.size(-1) != self.input_dims:
raise RuntimeError(
f"Input shape did not match self.input_dims. Got total feature dims [{inputs.size(-1)}],"
f" expected [{self.input_dims}]"
)
# Repeat the input for all possible outputs
if self.output_dims is not None:
inputs = inputs.unsqueeze(-3)
inputs = inputs.expand(*inputs.shape[:-3], self.output_dims, *inputs.shape[-2:])
# Now run samples through the GP
output = ApproximateGP.__call__(self, inputs)
if self.output_dims is not None:
mean = output.loc.transpose(-1, -2)
covar = BlockDiagLazyTensor(output.lazy_covariance_matrix, block_dim=-3)
output = MultitaskMultivariateNormal(mean, covar, interleaved=False)
# Maybe expand inputs?
if deterministic_inputs:
output = output.expand(torch.Size([settings.num_likelihood_samples.value()]) + output.batch_shape)
return output
class DeepGP(GP):
"""
A container module to build a DeepGP.
This module should contain :obj:`~gpytorch.models.deep.DeepGPLayer`
modules, and can also contain other modules as well.
"""
def __init__(self):
super().__init__()
self.variational_strategy = _DeepGPVariationalStrategy(self)
def forward(self, x):
raise NotImplementedError
class DeepLikelihood(Likelihood):
"""
A wrapper to make a GPyTorch likelihood compatible with Deep GPs
Example:
>>> deep_gaussian_likelihood = gpytorch.likelihoods.DeepLikelihood(gpytorch.likelihood.GaussianLikelihood)
"""
def __init__(self, base_likelihood):
super().__init__()
warnings.warn(
"DeepLikelihood is now deprecated. Use a standard likelihood in conjunction with a "
"gpytorch.mlls.DeepApproximateMLL. See the DeepGP example in our documentation.",
DeprecationWarning,
)
self.base_likelihood = base_likelihood
def expected_log_prob(self, observations, function_dist, *params, **kwargs):
return self.base_likelihood.expected_log_prob(observations, function_dist, *params, **kwargs).mean(dim=0)
def log_marginal(self, observations, function_dist, *params, **kwargs):
return self.base_likelihood.log_marginal(observations, function_dist, *params, **kwargs).mean(dim=0)
def forward(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self.base_likelihood.__call__(*args, **kwargs)
| jrg365/gpytorch | gpytorch/models/deep_gps/deep_gp.py | Python | mit | 6,446 | [
"Gaussian"
] | ba8209f6ce46f3d5f11f6f4e3eafd1db17474a0e04acb42a1081d7aec093d7a9 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import warnings
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape", "get_batch_shape", "event_shape", "get_event_shape",
"sample_n", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "std", "mode"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create
# a non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.get_event_shape()
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape
# Sampling returns a sample per distribution. `samples` has shape
# (5, 2, 2), which is (n,) + batch_shape + event_shape, where n=5,
# batch_shape=(2, 2), and event_shape=().
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape (2, 2) as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is (2, 2), one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `a` and `b`, and does not have well-defined mode if
`a < 1` or `b < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
is_continuous,
is_reparameterized,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
is_continuous: Python boolean. If `True` this
`Distribution` is continuous over its supported domain.
is_reparameterized: Python boolean. If `True` this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python boolean. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
parameters: Python dictionary of parameters used to instantiate this
`Distribution`.
graph_parents: Python list of graph prerequisites of this `Distribution`.
name: A name for this distribution. Default: subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
parameters = parameters or {}
self._dtype = dtype
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
Subclasses should override static method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. TensorShape) shapes.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
return self._parameters
@property
def is_continuous(self):
return self._is_continuous
@property
def is_reparameterized(self):
return self._is_reparameterized
@property
def allow_nan_stats(self):
"""Python boolean describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance
of a Cauchy distribution is infinity. However, sometimes the
statistic is undefined, e.g., if a distribution's pdf does not achieve a
maximum within the support of the distribution, the mode is undefined.
If the mean is undefined, then by definition the variance is undefined.
E.g. the mean for Student's T for df = 1 is undefined (no clear way to say
it is either + or - infinity), so the variance = E[(X - mean)^2] is also
undefined.
Returns:
allow_nan_stats: Python boolean.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python boolean indicated possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
intialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` intitialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
# Python3 leaks "__class__" into `locals()` so we remove if present.
# TODO(b/32376812): Remove this pop.
parameters.pop("__class__", None)
return type(self)(**parameters)
def _batch_shape(self):
raise NotImplementedError("batch_shape is not implemented")
def batch_shape(self, name="batch_shape"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
return self._batch_shape()
def _get_batch_shape(self):
return tensor_shape.TensorShape(None)
def get_batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._get_batch_shape()
def _event_shape(self):
raise NotImplementedError("event_shape is not implemented")
def event_shape(self, name="event_shape"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
return self._event_shape()
def _get_event_shape(self):
return tensor_shape.TensorShape(None)
def get_event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._get_event_shape()
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
if sample_shape.get_shape().ndims == 0:
return self.sample_n(sample_shape, seed, **condition_kwargs)
sample_shape, total = self._expand_sample_shape(sample_shape)
samples = self.sample_n(total, seed, **condition_kwargs)
output_shape = array_ops.concat_v2(
[sample_shape, array_ops.slice(array_ops.shape(samples), [1], [-1])],
0)
output = array_ops.reshape(samples, output_shape)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
def sample_n(self, n, seed=None, name="sample_n", **condition_kwargs):
"""Generate `n` samples.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with a prepended dimension (n,).
Raises:
TypeError: if `n` is not an integer type.
"""
warnings.warn("Please use `sample` instead of `sample_n`. `sample_n` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
with self._name_scope(name, values=[n]):
n = ops.convert_to_tensor(n, name="n")
if not n.dtype.is_integer:
raise TypeError("n.dtype=%s is not an integer type" % n.dtype)
x = self._sample_n(n, seed, **condition_kwargs)
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(n))
batch_ndims = self.get_batch_shape().ndims
event_ndims = self.get_event_shape().ndims
if batch_ndims is not None and event_ndims is not None:
inferred_shape = sample_shape.concatenate(
self.get_batch_shape().concatenate(
self.get_event_shape()))
x.set_shape(inferred_shape)
elif x.get_shape().ndims is not None and x.get_shape().ndims > 0:
x.get_shape()[0].merge_with(sample_shape[0])
if batch_ndims is not None and batch_ndims > 0:
x.get_shape()[1:1+batch_ndims].merge_with(self.get_batch_shape())
if event_ndims is not None and event_ndims > 0:
x.get_shape()[-event_ndims:].merge_with(self.get_event_shape())
return x
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def log_prob(self, value, name="log_prob", **condition_kwargs):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob", **condition_kwargs):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def cdf(self, value, name="cdf", **condition_kwargs):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(1. - self.cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def survival_function(self, value, name="survival_function",
**condition_kwargs):
"""Survival function.
Given random variable `X`, the survival function is defined:
```
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **condition_kwargs)
except NotImplementedError:
raise original_exception
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance."""
with self._name_scope(name):
return self._variance()
def _std(self):
raise NotImplementedError("std is not implemented")
def std(self, name="std"):
"""Standard deviation."""
with self._name_scope(name):
return self._std()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def log_pdf(self, value, name="log_pdf", **condition_kwargs):
"""Log probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pdf`. `log_pdf` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("log_pdf is undefined for non-continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pdf(self, value, name="pdf", **condition_kwargs):
"""Probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pdf`. `pdf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("pdf is undefined for non-continuous distributions.")
return self.prob(value, name, **condition_kwargs)
def log_pmf(self, value, name="log_pmf", **condition_kwargs):
"""Log probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pmf`. `log_pmf` will "
"be deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("log_pmf is undefined for continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pmf(self, value, name="pmf", **condition_kwargs):
"""Probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pmf`. `pmf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("pmf is undefined for continuous distributions.")
return self.prob(value, name=name, **condition_kwargs)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape(self, sample_shape):
"""Helper to `sample` which ensures sample_shape is 1D."""
sample_shape_static_val = tensor_util.constant_value(sample_shape)
ndims = sample_shape.get_shape().ndims
if sample_shape_static_val is None:
if ndims is None or not sample_shape.get_shape().is_fully_defined():
ndims = array_ops.rank(sample_shape)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array((1,), dtype=dtypes.int32.as_numpy_dtype()),
array_ops.shape(sample_shape))
sample_shape = array_ops.reshape(sample_shape, expanded_shape)
total = math_ops.reduce_prod(sample_shape) # reduce_prod([]) == 1
else:
if ndims is None:
raise ValueError(
"Shouldn't be here; ndims cannot be none when we have a "
"tf.constant shape.")
if ndims == 0:
sample_shape_static_val = np.reshape(sample_shape_static_val, [1])
sample_shape = ops.convert_to_tensor(
sample_shape_static_val,
dtype=dtypes.int32,
name="sample_shape")
total = np.prod(sample_shape_static_val,
dtype=dtypes.int32.as_numpy_dtype())
return sample_shape, total
| ppries/tensorflow | tensorflow/contrib/distributions/python/ops/distribution.py | Python | apache-2.0 | 33,977 | [
"Gaussian"
] | 2dc946d78360d99f6637ab01b600eb55caf49199e2866d9fd9de422153369f64 |
import os
import numpy as np
import MMTK
class Grid:
"""
Class to read and write alchemical grids.
Data is a dictionary with
spacing - the grid spacing, in Angstroms.
counts - the number of points in each dimension.
vals - the values.
All are numpy arrays.
"""
def __init__(self):
pass
def read(self, FN, multiplier=None):
"""
Reads a grid in dx or netcdf format
The multiplier affects the origin and spacing.
"""
if FN is None:
raise Exception('File is not defined')
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
data = self._read_dx(FN)
elif FN.endswith('.nc'):
data = self._read_nc(FN)
else:
raise Exception('File type not supported')
if multiplier is not None:
data['origin'] = multiplier*data['origin']
data['spacing'] = multiplier*data['spacing']
return data
def _read_dx(self, FN):
"""
Reads a grid in dx format
"""
if FN.endswith('.dx'):
F = open(FN,'r')
else:
import gzip
F = gzip.open(FN,'r')
# Read the header
line = F.readline()
while line.find('object')==-1:
line = F.readline()
header = {}
header['counts'] = [int(x) for x in line.split(' ')[-3:]]
for name in ['origin','d0','d1','d2']:
header[name] = [float(x) for x in F.readline().split(' ')[-3:]]
F.readline()
header['npts'] = int(F.readline().split(' ')[-3])
# Test to make sure the grid type is okay.
# These conditions are not absolultely essential,
# but they reduce the number of subtraction operations.
if not (header['d0'][1]==0 and header['d0'][2]==0 and
header['d1'][0]==0 and header['d1'][2]==0 and
header['d2'][0]==0 and header['d2'][1]==0):
raise Exception('Trilinear grid must be in original basis')
if not (header['d0'][0]>0 and header['d1'][1]>0 and header['d2'][2]>0):
raise Exception('Trilinear grid must have positive coordinates')
# Read the data
vals = np.ndarray(shape=header['npts'], dtype=float)
index = 0
while index<header['npts']:
line = F.readline()[:-1]
items = [float(item) for item in line.split()]
vals[index:index+len(items)] = items
index = index + len(items)
F.close()
data = {
'origin':np.array(header['origin']), \
'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \
'counts':np.array(header['counts']), \
'vals':vals}
return data
def _read_nc(self, FN):
"""
Reads a grid in netcdf format
"""
from netCDF4 import Dataset
grid_nc = Dataset(FN,'r')
data = {}
for key in list(grid_nc.variables):
data[key] = np.array(grid_nc.variables[key][:][0][:])
grid_nc.close()
return data
def write(self, FN, data, multiplier=None):
"""
Writes a grid in dx or netcdf format.
The multiplier affects the origin and spacing.
"""
if multiplier is not None:
data_n = {'origin':multiplier*data['origin'],
'counts':data['counts'],
'spacing':multiplier*data['spacing'],
'vals':data['vals']}
else:
data_n = data
if FN.endswith('.nc'):
self._write_nc(FN, data_n)
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
self._write_dx(FN, data_n)
else:
raise Exception('File type not supported')
def _write_dx(self, FN, data):
"""
Writes a grid in dx format
"""
n_points = data['counts'][0]*data['counts'][1]*data['counts'][2]
if FN.endswith('.dx'):
F = open(FN,'w')
else:
import gzip
F = gzip.open(FN,'w')
F.write("""object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}
origin {1[0]} {1[1]} {1[2]}
delta {2[0]} 0.0 0.0
delta 0.0 {2[1]} 0.0
delta 0.0 0.0 {2[2]}
object 2 class gridconnections counts {0[0]} {0[1]} {0[2]}
object 3 class array type double rank 0 items {3} data follows
""".format(data['counts'],data['origin'],data['spacing'],n_points))
for start_n in range(0,len(data['vals']),3):
F.write(' '.join(['%6e'%c for c in data['vals'][start_n:start_n+3]]) + '\n')
F.write('object 4 class field\n')
F.write('component "positions" value 1\n')
F.write('component "connections" value 2\n')
F.write('component "data" value 3\n')
F.close()
def _write_nc(self, FN, data):
"""
Writes a grid in netcdf format
"""
n_points = data['counts'][0]*data['counts'][1]*data['counts'][2]
from netCDF4 import Dataset
grid_nc = Dataset(FN,'w',format='NETCDF4')
grid_nc.createDimension('one', 1)
grid_nc.createDimension('n_cartesian', 3)
grid_nc.createDimension('n_points', n_points)
grid_nc.createVariable('origin','f8',('one','n_cartesian'))
grid_nc.createVariable('counts','i8',('one','n_cartesian'))
grid_nc.createVariable('spacing','f8',('one','n_cartesian'))
grid_nc.createVariable('vals','f8',('one','n_points'), zlib=True)
for key in data.keys():
grid_nc.variables[key][:] = data[key]
grid_nc.close()
def truncate(self, in_FN, out_FN, counts, multiplier=None):
"""
Truncates the grid at the origin and
with a limited number of counts per dimension
multiplier is for the values, not the grid scaling
"""
data_o = self.read(in_FN)
nyz_o = data_o['counts'][1]*data_o['counts'][2]
nz_o = data_o['counts'][2]
min_i = int(-data_o['origin'][0]/data_o['spacing'][0])
min_j = int(-data_o['origin'][1]/data_o['spacing'][1])
min_k = int(-data_o['origin'][2]/data_o['spacing'][2])
# vals = np.ndarray(shape=tuple(counts), dtype=float)
# for i in range(counts[0]):
# for j in range(counts[1]):
# for k in range(counts[2]):
# vals[i,j,k] = data_o['vals'][(i+min_i)*nyz_o + (j+min_j)*nz_o + (k+min_k)]
vals = np.array(
[[[data_o['vals'][(i+min_i)*nyz_o + (j+min_j)*nz_o + (k+min_k)]
for k in range(counts[2])]
for j in range(counts[1])]
for i in range(counts[0])])
if multiplier is not None:
vals = vals*multiplier
data_n = {'origin':np.array([0., 0., 0.]), \
'counts':counts, 'spacing':data_o['spacing'], 'vals':vals.flatten()}
self.write(out_FN,data_n)
class crd:
"""
Class to read and write AMBER coordinate/restart and trajectory files.
"""
def __init__(self):
pass
def read(self, FN, natoms=None, return_title=False, \
multiplier=None, trajectory=False):
"""
Reads an AMBER coordinate/restart or trajectory file.
If natoms is not none, then the coordinates will be split
into a list of natoms X 3 arrays.
The coordinates will be multiplied by multiplier.
The default of 0.1 converts Angstroms into nanometers.
"""
if not os.path.isfile(FN):
raise Exception('Coordinate file %s does not exist!'%FN)
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'r')
else:
F = open(FN,'r')
dat = F.read().strip().split('\n')
F.close()
title = dat.pop(0) # Title
if len(dat[0].split())>1:
# VMD format (does not specify number of atoms)
crd = []
for line in dat:
crd = crd + [float(x) for x in line.split()]
crd = np.resize(crd,(len(crd)/3,3))
else:
# AMBER format
file_natoms = int(dat.pop(0)) # Number of atoms
if (natoms is not None) and (file_natoms!=natoms):
print "Incorrect number of atoms in crd file"
return np.array([])
if trajectory:
w = 8 # For mdcrd
else:
w = 12 # For inpcrd
crd = []
for line in dat:
crd = crd + [float(line[x:x+w]) for x in range(0,len(line),w)]
crd = np.resize(crd,(len(crd)/3,3))
if multiplier is not None:
crd = multiplier*crd
if (natoms is not None):
crd = np.vsplit(crd,crd.shape[0]/natoms)
print " read %d configurations from %s"%(len(crd), FN)
if return_title:
return (crd, title)
else:
return crd
def write(self, FN, crd, title='', append=False, \
multiplier=None, trajectory=False):
"""
Writes an AMBER coordinate/restart or trajectory file
"""
if (append and os.path.isfile(FN)):
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN,'a')
else:
F = open(FN,'a')
else:
if os.path.isfile(FN):
os.rename(FN,FN+'.BAK')
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN,'w')
else:
F = open(FN,'w')
# Write the header
F.write(title+'\n') # Title
if not trajectory:
F.write('%d\n'%crd.shape[0])
if not trajectory:
flattened = np.vstack(crd).flatten()
if multiplier is not None:
flattened = multiplier*flattened
for n in range(0,len(flattened),6):
F.write(''.join(['%12.7f'%val for val in flattened[n:n+6]]) + '\n')
else:
for c in crd:
flattened = c.flatten()
if multiplier is not None:
flattened = multiplier*flattened
for n in range(0,len(flattened),10):
F.write(''.join(['%8.3f'%val for val in flattened[n:n+10]]) + '\n')
F.close()
class dock6_mol2:
"""
Class to read output from UCSF DOCK 6
"""
def __init__(self):
pass
def read(self, FN, reorder=None):
crds = []
E = {}
if (FN is None) or (not os.path.isfile(FN)):
return (crds,E)
# Specifically to read output from UCSF dock6
if FN.endswith('.mol2'):
mol2F = open(FN,'r')
elif FN.endswith('.mol2.gz'):
import gzip
mol2F = gzip.open(FN,'r')
else:
raise Exception('Unknown file type')
models = mol2F.read().strip().split('########## Name:')
mol2F.close()
models.pop(0)
if len(models)>0:
for line in models[0].split('\n'):
if line.startswith('##########'):
label = line[11:line.find(':')].strip()
E[label] = []
for model in models:
fields = model.split('<TRIPOS>')
crd = np.array([l.split()[2:5] for l in fields[2].split('\n')[1:-1]],
dtype=float)/10.
if reorder is not None:
crd = crd[reorder,:]
for line in fields[0].split('\n'):
if line.startswith('##########'):
label = line[11:line.find(':')].strip()
E[label].append(float(line.split()[-1]))
crds.append(crd)
return (crds,E)
class dcd:
"""
Class to write DCD files
"""
def __init__(self, molecule, ligand_atom_order=None, \
receptorConf=None, ligand_first_atom=0):
self.molecule = molecule
self.receptorConf = receptorConf
self.ligand_first_atom = ligand_first_atom
if ligand_atom_order is None:
self.ligand_atom_order = range(len(self.molecule.atoms))
else:
self.ligand_atom_order = ligand_atom_order
pass
def write(self, FN, confs,
includeLigand=True, includeReceptor=False,
factor=1.0/MMTK.Units.Ang,
delta_t=0.1):
"""
Writes a DCD file for a trajectory.
If includeReceptor==True, the receptor coordinates are included.
"""
import MMTK_DCD # @UnresolvedImport
from Scientific import N
if not isinstance(confs,list):
confs = [confs]
if includeReceptor and (self.receptorConf is None):
raise Exception("Missing receptor configuration")
n_atoms = 0
if includeReceptor:
receptor_x0 = factor*self.receptorConf[:self.ligand_first_atom,0]
receptor_y0 = factor*self.receptorConf[:self.ligand_first_atom,1]
receptor_z0 = factor*self.receptorConf[:self.ligand_first_atom,2]
receptor_x1 = factor*self.receptorConf[self.ligand_first_atom:,0]
receptor_y1 = factor*self.receptorConf[self.ligand_first_atom:,1]
receptor_z1 = factor*self.receptorConf[self.ligand_first_atom:,2]
n_atoms += self.receptorConf.shape[0]
if includeLigand:
n_atoms += len(self.molecule.atoms)
n_snaps = len(confs)
fd = MMTK_DCD.writeOpenDCD(FN, n_atoms, n_snaps, 1, 1, delta_t)
if includeReceptor and includeLigand:
for array in confs:
array = factor*array
x = N.concatenate((receptor_x0,N.take(array[:,0],self.ligand_atom_order),receptor_x1)).astype(N.Float16)
y = N.concatenate((receptor_y0,N.take(array[:,1],self.ligand_atom_order),receptor_y1)).astype(N.Float16)
z = N.concatenate((receptor_z0,N.take(array[:,2],self.ligand_atom_order),receptor_z1)).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
elif includeLigand:
for array in confs:
array = factor*array
x = N.take(array[:,0], self.ligand_atom_order).astype(N.Float16)
y = N.take(array[:,1], self.ligand_atom_order).astype(N.Float16)
z = N.take(array[:,2], self.ligand_atom_order).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
else:
x = N.concatenate((receptor_x0,receptor_x1)).astype(N.Float16)
y = N.concatenate((receptor_y0,receptor_y1)).astype(N.Float16)
z = N.concatenate((receptor_z0,receptor_z1)).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
class prmtop:
"""
Class to read AMBER prmtop files
"""
def __init__(self):
pass
def read(self, FN, varnames=['RESIDUE_LABEL','RESIDUE_POINTER']):
"""
Reads an AMBER prmtop file, returning a dictionary
"""
if not os.path.isfile(FN):
raise Exception('prmtop file %s does not exist!'%FN)
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'r')
else:
F = open(FN,'r')
data = F.read().split('%FLAG ')
F.close()
prmtop = {}
for record in data:
name = record[:record.find('\n')].strip()
if name in varnames:
prmtop[name] = self._load_record(record)
return prmtop
def _load_record(self, record):
items = []
lines = record.split('\n')
lines.pop(0) # Name
FORMAT = lines.pop(0).strip()[8:-1] # Format
if FORMAT.find('a')>-1: # Text
w = int(FORMAT[FORMAT.find('a')+1:])
for line in lines:
items = items + [line[x:x+w] for x in range(0,len(line),w)]
return np.array(items)
elif FORMAT.find('I')>-1: # Integer
w = int(FORMAT[FORMAT.find('I')+1:])
for line in lines:
items = items + [int(line[x:x+w]) for x in range(0,len(line),w)]
return np.array(items, dtype=int)
elif FORMAT.find('E')>-1: # Scientific
w = int(FORMAT[FORMAT.find('E')+1:FORMAT.find('.')])
for line in lines:
items = items + [float(line[x:x+w]) for x in range(0,len(line),w)]
return np.array(items, dtype=float)
| luizcieslak/AlGDock | AlGDock/IO.py | Python | mit | 14,740 | [
"Amber",
"NetCDF",
"VMD"
] | 71a55a40577dc00d55848a0e0907eb49edcc4fe6f0af05cbf5c72107da03c96d |
#!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
g = vtk.vtkMutableUndirectedGraph()
# Create 3 vertices
v1 = g.AddVertex()
v2 = g.AddVertex()
v3 = g.AddVertex()
# Create a fully connected graph
g.AddEdge(v1, v2)
g.AddEdge(v2, v3)
g.AddEdge(v1, v3)
# Create the edge weight array
weights = vtk.vtkDoubleArray()
weights.SetNumberOfComponents(1)
weights.SetName("Weights")
# Set the edge weights
weights.InsertNextValue(1.0)
weights.InsertNextValue(1.0)
weights.InsertNextValue(2.0)
# Create an array for the vertex labels
vertexIDs = vtk.vtkIntArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue(0)
vertexIDs.InsertNextValue(1)
vertexIDs.InsertNextValue(2)
# Add the edge weight array to the graph
g.GetEdgeData().AddArray(weights)
g.GetVertexData().AddArray(vertexIDs)
circularLayoutStrategy = vtk.vtkCircularLayoutStrategy()
graphLayoutView = vtk.vtkGraphLayoutView()
graphLayoutView.AddRepresentationFromInput(g)
graphLayoutView.SetLayoutStrategy(circularLayoutStrategy)
graphLayoutView.SetVertexLabelVisibility(1)
graphLayoutView.SetEdgeLabelVisibility(1)
graphLayoutView.SetEdgeLabelArrayName("Weights") #default is "labels"
graphLayoutView.SetVertexLabelArrayName("VertexIDs") #default is "labels"
graphLayoutView.ResetCamera()
graphLayoutView.Render()
graphLayoutView.GetInteractor().Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Graphs/LabelVerticesAndEdges.py | Python | apache-2.0 | 1,646 | [
"VTK"
] | a1c73f8b5082e949138c98e8e9670efed182ffd643153fd4c889a48a601f7a82 |
import ast
import itertools
import multiprocessing
def get_imports(fs, root_path, parent_span):
# TODO: consider crawling over the main project files only; ignore
# examples, tests, etc
py_paths = (path for path in fs.walk(root_path) if path.endswith(".py"))
py_srces = fs.batch_open(py_paths, parent_span)
with multiprocessing.Pool() as p:
import_chunks = p.imap_unordered(
_imap_extract_imports, py_srces, chunksize=10)
imports = {i.split(".")[0]
for i in itertools.chain.from_iterable(import_chunks)}
return imports
def _imap_extract_imports(args):
path, src = args
return set(extract_imports(src, path))
def extract_imports(source, path):
try:
tree = ast.parse(source)
except SyntaxError:
return
v = ImportVisitor()
for i in v.visit(tree):
yield i
class ImportVisitor:
def visit_Module(self, node, container):
# Modules is our global scope. Just visit all the children
yield from self.generic_visit(node)
def visit_Import(self, node, container):
for n in node.names:
yield n.name
def visit_ImportFrom(self, node, container):
# we only care about top-level imports, and definitely want to
# ignore internal imports
if not node.level:
yield node.module
# Based on ast.NodeVisitor.visit
def visit(self, node, container=None):
# Two changes from ast.NodeVisitor.visit:
# * Do not fallback to generic_visit (we only care about top-level)
# * container optional argument
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, None)
if visitor is not None:
yield from visitor(node, container)
# Based on ast.NodeVisitor.generic_visit
def generic_visit(self, node, container=None):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from self.visit(item, container)
elif isinstance(value, ast.AST):
yield from self.visit(value, container)
| sourcegraph/python-langserver | langserver/imports.py | Python | mit | 2,232 | [
"VisIt"
] | ccdbb44c43420bb2ace8003b7e1b16fda1c451285019753dadb68c77df0ad5e3 |
import numpy
import cPickle
import theano
import theano.tensor as T
from mlp.logistic_sgd import LogisticRegression
from dA.AutoEncoder import AutoEncoder
from SdA import SdA
from numpy.linalg import norm
from theano.tensor.shared_randomstreams import RandomStreams
from extract_datasets import extract_unlabeled_chunkrange
from load_shared import load_data_unlabeled
from tables import openFile
import os
import sys
import time
from datetime import datetime
from optparse import OptionParser
def test_pickle_SdA(num_epochs=10, pretrain_lr=0.00001, lr_decay = 0.98, batch_size=20):
"""
Pretrain an SdA model for the given number of training epochs. The model is either initialized from scratch, or
is reconstructed from a previously pickled model.
:type num_epochs: int
:param num_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type batch_size: int
:param batch_size: train in mini-batches of this size
"""
layer_types=['Gaussian','Bernoulli']
current_dir = os.getcwd()
os.chdir(options.dir)
today = datetime.today()
day = str(today.date())
hour = str(today.time())
output_filename = "test_pickle_sda_." + '_'.join([elem for elem in layer_types]) + day + "." + hour
output_file = open(output_filename,'w')
os.chdir(current_dir)
print >> output_file, "Run on " + str(datetime.now())
# Get the training data sample from the input file
data_set_file = openFile(str(options.inputfile), mode = 'r')
datafiles = extract_unlabeled_chunkrange(data_set_file, num_files = 10)
train_set_x = load_data_unlabeled(datafiles, features = (5,20))
data_set_file.close()
# compute number of minibatches for training, validation and testing
n_train_batches, n_features = train_set_x.get_value(borrow=True).shape
n_train_batches /= batch_size
# numpy random generator
numpy_rng = numpy.random.RandomState(89677)
print '... building the model'
# Set the initial value of the learning rate
learning_rate = theano.shared(numpy.asarray(pretrain_lr,
dtype=theano.config.floatX))
# Function to decrease the learning rate
decay_learning_rate = theano.function(inputs=[], outputs=learning_rate,
updates={learning_rate: learning_rate * lr_decay})
sda_model = SdA(numpy_rng=numpy_rng, n_ins=n_features,
hidden_layers_sizes=[5, 5],
corruption_levels = [0.25, 0.25],
layer_types=layer_types)
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = sda_model.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size,
learning_rate=learning_rate)
#print '... dumping pretraining functions to output file pre pickling'
#print >> output_file, 'Pretraining functions, pre pickling'
#for i in xrange(sda.n_layers):
#theano.printing.debugprint(pretraining_fns[i], file = output_file, print_type=True)
print '... pre-training the model'
start_time = time.clock()
## Pre-train layer-wise
corruption_levels = [float(options.corruption), float(options.corruption)]
for i in xrange(sda_model.n_layers):
for epoch in xrange(num_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i]))
print >> output_file, 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print >> output_file, numpy.mean(c)
print >> output_file, 'Learning rate '
print >> output_file, learning_rate.get_value(borrow=True)
decay_learning_rate()
end_time = time.clock()
print >> output_file, ('Pretraining time for file ' +
os.path.split(__file__)[1] +
' was %.2fm to go through %i epochs' % (((end_time - start_time) / 60.), (num_epochs / 2)))
# Pickle the SdA
print >> output_file, 'Pickling the model...'
f = file(options.savefile, 'wb')
cPickle.dump(sda_model, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
# Unpickle the SdA
print >> output_file, 'Unpickling the model...'
f = file(options.savefile, 'rb')
pickled_sda = cPickle.load(f)
f.close()
# Test that the W-matrices and biases for the dA layers in sda are all close to the W-matrices
# and biases freshly unpickled
for i in xrange(pickled_sda.n_layers):
pickled_dA_params = pickled_sda.dA_layers[i].get_params()
fresh_dA_params = sda_model.dA_layers[i].get_params()
if not numpy.allclose(pickled_dA_params[0].get_value(), fresh_dA_params[0].get_value()):
print >> output_file, ("numpy says that Ws in layer %i are not close" % (i))
print >> output_file, "Norm for pickled dA " + pickled_dA_params[0].name + ": "
print >> output_file, norm(pickled_dA_params[0].get_value())
print >> output_file, "Values for pickled dA " + pickled_dA_params[0].name + ": "
print >> output_file, numpy.array_repr(pickled_dA_params[0].get_value())
print >> output_file, "Norm for fresh dA " + fresh_dA_params[0].name + ": "
print >> output_file, norm(fresh_dA_params[0].get_value())
print >> output_file, "Values for fresh dA " + fresh_dA_params[0].name + ": "
print >> output_file, numpy.array_repr(fresh_dA_params[0].get_value())
if not numpy.allclose(pickled_dA_params[1].get_value(), fresh_dA_params[1].get_value()):
print >> output_file, ("numpy says that the biases in layer %i are not close" % (i))
print >> output_file, "Norm for pickled dA " + pickled_dA_params[1].name + ": "
print >> output_file, norm(pickled_dA_params[1].get_value())
print >> output_file, "Values for pickled dA " + pickled_dA_params[1].name + ": "
print >> output_file, numpy.array_repr(pickled_dA_params[1].get_value())
print >> output_file, "Norm for fresh dA " + fresh_dA_params[1].name + ": "
print >> output_file, norm(fresh_dA_params[1].get_value())
print >> output_file, "Values for fresh dA " + pickled_dA_params[1].name + ": "
print >> output_file, numpy.array_repr(pickled_dA_params[1].get_value())
output_file.close()
def test_unpickle_SdA(num_epochs=10, pretrain_lr=0.001, batch_size=10, lr_decay = 0.98):
""" Unpickle an SdA from file, continue pre-training for a given number of epochs.
:type num_epochs: int
:param num_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type batch_size: int
:param batch_size: train in mini-batches of this size
:type lr_decay: float
:param lr_decay: decay the learning rate by this proportion each epoch
"""
current_dir = os.getcwd()
os.chdir(options.dir)
today = datetime.today()
day = str(today.date())
hour = str(today.time())
output_filename = "test_unpickle_sda_pretrain." + day + "." + hour
output_file = open(output_filename,'w')
os.chdir(current_dir)
print >> output_file, "Run on " + str(datetime.now())
# Get the training data sample from the input file
data_set_file = openFile(str(options.inputfile), mode = 'r')
datafiles = extract_unlabeled_chunkrange(data_set_file, num_files = 10)
train_set_x = load_data_unlabeled(datafiles, features = (5,20))
data_set_file.close()
# compute number of minibatches for training, validation and testing
n_train_batches, n_features = train_set_x.get_value(borrow=True).shape
n_train_batches /= batch_size
learning_rate = theano.shared(numpy.asarray(pretrain_lr,
dtype=theano.config.floatX))
# Function to decrease the learning rate
decay_learning_rate = theano.function(inputs=[], outputs=learning_rate,
updates={learning_rate: learning_rate * lr_decay})
# Unpickle the SdA
print >> output_file, 'Unpickling the model...'
f = file(options.savefile, 'rb')
pickled_sda = cPickle.load(f)
f.close()
# Train for the remaining
pretraining_fns = pickled_sda.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size,
learning_rate=learning_rate)
print >> output_file, 'Resume training...'
start_time = time.clock()
## Pre-train layer-wise ##
corruption_levels = [float(options.corruption), float(options.corruption)]
for i in xrange(pickled_sda.n_layers):
for epoch in xrange(num_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i]))
print >> output_file, 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print >> output_file, numpy.mean(c)
decay_learning_rate()
print >> output_file, 'Learning rate '
print >> output_file, learning_rate.get_value(borrow=True)
end_time = time.clock()
print >> output_file, ('Pretraining time for file ' +
os.path.split(__file__)[1] +
' was %.2fm to go through the remaining %i epochs' % (((end_time - start_time) / 60.), (num_epochs / 2)))
output_file.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--dir", dest="dir", help="test output directory")
parser.add_option("-s","--savefile",dest = "savefile", help = "Save the model to this pickle file")
parser.add_option("-r","--restorefile",dest = "restorefile", help = "Restore the model from this pickle file")
parser.add_option("-i", "--inputfile", dest="inputfile", help="the data (hdf5 file) prepended with an absolute path")
parser.add_option("-c", "--corruption", dest="corruption", help="use this amount of corruption for the dA s")
(options, args) = parser.parse_args()
test_pickle_SdA()
test_unpickle_SdA()
| lzamparo/SdA_reduce | theano_models/SdA/test_pickle_SdA.py | Python | bsd-3-clause | 10,938 | [
"Gaussian"
] | 4da143c4938582a97dfcd6e833c76c773a967fe4f868f88706c8f773bf0d2a55 |
#!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import MetadataObject
from genemethods.geneseekr.geneseekr import GeneSeekr
from genemethods.geneseekr.blast import BLAST
import multiprocessing
from glob import glob
from time import time
import os
test_path = os.path.abspath(os.path.dirname(__file__))
__author__ = 'adamkoziol'
def variables():
v = MetadataObject()
datapath = os.path.join(test_path, 'testdata')
v.sequencepath = os.path.join(datapath, 'aa_sequences')
v.targetpath = os.path.join(datapath, 'databases', 'card_aa')
v.reportpath = os.path.join(datapath, 'reports')
v.cutoff = 70
v.evalue = '1E-05'
v.align = False
v.unique = False
v.resfinder = False
v.virulencefinder = False
v.numthreads = multiprocessing.cpu_count()
v.start = time()
return v
def method_init(analysistype, program, align, unique):
global var
var = variables()
var.analysistype = analysistype
var.program = program
var.align = align
var.unique = unique
method = BLAST(var)
return method
blastp_method = method_init(analysistype='geneseekr',
program='blastp',
align=True,
unique=True)
def test_parser():
assert os.path.basename(blastp_method.targets[0]) == 'amr.tfa'
def test_combined_files():
assert os.path.isfile(blastp_method.combinedtargets)
def test_strains():
assert os.path.isfile(blastp_method.strains[0])
def test_strain():
assert os.path.basename(blastp_method.strains[0]) == 'amr_test.fasta'
def test_makeblastdb():
global geneseekr
geneseekr = GeneSeekr()
geneseekr.makeblastdb(fasta=blastp_method.combinedtargets,
program=blastp_method.program)
assert os.path.isfile(os.path.join(var.targetpath, 'combinedtargets.psq'))
def test_variable_populate():
global targetfolders
global targetfiles
global records
targetfolders, targetfiles, records = \
geneseekr.target_folders(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype)
def test_targetfolders():
assert os.path.basename(list(targetfolders)[0]) == 'card_aa'
def test_targetfiles():
assert targetfiles[0] == blastp_method.combinedtargets
def test_records():
assert records[targetfiles[0]]['yojI']
def test_blastp():
global blastp_report
blastp_method.metadata = geneseekr.run_blast(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype,
program=blastp_method.program,
outfmt=blastp_method.outfmt,
evalue=blastp_method.evalue,
num_threads=blastp_method.cpus)
blastp_report = os.path.join(var.reportpath, 'amr_test_blastp_geneseekr.tsv')
assert os.path.isfile(blastp_report)
def test_enhance_report_parsing():
geneseekr.parseable_blast_outputs(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype,
fieldnames=blastp_method.fieldnames,
program=blastp_method.program)
header = open(blastp_report).readline()
assert header.split('\t')[0] == 'query_id'
def test_blastp_results():
with open(blastp_report) as blast_results:
next(blast_results)
data = blast_results.readline()
results = data.split('\t')
assert int(results[2]) >= 50
def test_blast_parse():
blastp_method.metadata = geneseekr.unique_parse_blast(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype,
fieldnames=blastp_method.fieldnames,
cutoff=blastp_method.cutoff,
program=blastp_method.program)
for sample in blastp_method.metadata:
assert sample.geneseekr.queryranges['contig1'] == [[1, 547]]
def test_filter():
blastp_method.metadata = geneseekr.filter_unique(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype)
for sample in blastp_method.metadata:
assert sample.geneseekr.blastlist[0]['percentidentity'] >= 70
def test_dict_create():
blastp_method.metadata = geneseekr.dict_initialise(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype)
for sample in blastp_method.metadata:
assert type(sample.geneseekr.protseq) is dict
def test_report_creation():
blastp_method.metadata = geneseekr.resfinder_reporter(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype,
reportpath=blastp_method.reportpath,
align=blastp_method.align,
program=blastp_method.program,
targetpath=blastp_method.targetpath,
cutoff=blastp_method.cutoff)
def test_report_csv():
global geneseekr_csv
geneseekr_csv = os.path.join(blastp_method.reportpath, 'amr_test_blastp_geneseekr.tsv')
assert os.path.isfile(geneseekr_csv)
def test_report_xls():
global geneseekr_xls
geneseekr_xls = os.path.join(blastp_method.reportpath, 'geneseekr_blastp.xlsx')
assert os.path.isfile(geneseekr_xls)
def test_parse_results():
for sample in blastp_method.metadata:
assert sample.geneseekr.blastresults['OXA_12'] == 91.86
def test_aaseq():
for sample in blastp_method.metadata:
assert sample.geneseekr.blastlist[0]['query_sequence'][:4] == 'MELL' or \
sample.geneseekr.blastlist[0]['query_sequence'][:4] == 'MSRI'
def test_fasta_create():
global fasta_file
geneseekr.export_fasta(metadata=blastp_method.metadata,
analysistype=blastp_method.analysistype,
reportpath=blastp_method.reportpath,
cutoff=blastp_method.cutoff,
program=blastp_method.program)
fasta_file = os.path.join(var.reportpath, 'amr_test_geneseekr.fasta')
assert os.path.isfile(fasta_file)
header = open(fasta_file, 'r').readline().rstrip()
assert header == '>amr_test_OXA_12'
def test_combined_targets_clean():
os.remove(blastp_method.combinedtargets)
def test_makeblastdb_clean():
databasefiles = glob(os.path.join(var.targetpath, 'combinedtargets.p*'))
for dbfile in databasefiles:
os.remove(dbfile)
def test_remove_blastp_report():
os.remove(blastp_report)
def test_remove_fasta_file():
os.remove(fasta_file)
def test_remove_geneseekr_xls():
os.remove(geneseekr_xls)
def test_remove_report_path():
os.rmdir(blastp_method.reportpath)
| OLC-Bioinformatics/GeneSeekr | tests/test_blastp.py | Python | mit | 7,385 | [
"BLAST"
] | aeeab2b97db2cdc56ae9f8e57271305aa8c47d998e13424857f6049b39afc81e |
# This is the instrument-specific file for the PS5000 series of instruments.
#
# pico-python is Copyright (c) 2013-2014 By:
# Colin O'Flynn <coflynn@newae.com>
# Mark Harfouche <mark.harfouche@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This is the low level driver file for a specific Picoscope.
By this, I mean if parameters want to get passed as strings, they should be
handled by PSBase
All functions here should take things as close to integers as possible, the
only exception here is for array parameters. Array parameters should be passed
in a pythonic way through numpy since the PSBase class should not be aware of
the specifics behind how the clib is called.
The functions should not have any default values as these should be handled
by PSBase.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import math
# to load the proper dll
import platform
# Do not import or use ill definied data types
# such as short int or long
# use the values specified in the h file
# float is always defined as 32 bits
# double is defined as 64 bits
from ctypes import byref, POINTER, create_string_buffer, c_float, \
c_int16, c_int32, c_uint32, c_void_p, c_int64, CFUNCTYPE
from ctypes import c_int32 as c_enum
from picoscope.picobase import _PicoscopeBase
# Decorators for callback functions. PICO_STATUS is uint32_t.
def blockReady(function):
"""typedef void (*ps5000aBlockReady)
(
int16_t handle,
PICO_STATUS status,
void * pParameter
)
"""
if function is None:
return None
callback = CFUNCTYPE(c_void_p, c_int16, c_uint32, c_void_p)
return callback(function)
class PS5000a(_PicoscopeBase):
"""The following are low-level functions for the PS5000."""
LIBNAME = "ps5000a"
NUM_CHANNELS = 4
CHANNELS = {"A": 0, "B": 1, "C": 2, "D": 3,
"External": 4, "MaxChannels": 4, "TriggerAux": 5}
ADC_RESOLUTIONS = {"8": 0, "12": 1, "14": 2, "15": 3, "16": 4}
CHANNEL_RANGE = [{"rangeV": 10E-3, "apivalue": 0, "rangeStr": "10 mV"},
{"rangeV": 20E-3, "apivalue": 1, "rangeStr": "20 mV"},
{"rangeV": 50E-3, "apivalue": 2, "rangeStr": "50 mV"},
{"rangeV": 100E-3, "apivalue": 3, "rangeStr": "100 mV"},
{"rangeV": 200E-3, "apivalue": 4, "rangeStr": "200 mV"},
{"rangeV": 500E-3, "apivalue": 5, "rangeStr": "500 mV"},
{"rangeV": 1.0, "apivalue": 6, "rangeStr": "1 V"},
{"rangeV": 2.0, "apivalue": 7, "rangeStr": "2 V"},
{"rangeV": 5.0, "apivalue": 8, "rangeStr": "5 V"},
{"rangeV": 10.0, "apivalue": 9, "rangeStr": "10 V"},
{"rangeV": 20.0, "apivalue": 10, "rangeStr": "20 V"},
{"rangeV": 50.0, "apivalue": 11, "rangeStr": "50 V"},
]
CHANNEL_COUPLINGS = {"DC": 1, "AC": 0}
# has_sig_gen = True
WAVE_TYPES = {"Sine": 0, "Square": 1, "Triangle": 2,
"RampUp": 3, "RampDown": 4,
"Sinc": 5, "Gaussian": 6, "HalfSine": 7, "DCVoltage": 8,
"WhiteNoise": 9}
SWEEP_TYPES = {"Up": 0, "Down": 1, "UpDown": 2, "DownUp": 3}
SIGGEN_TRIGGER_TYPES = {"Rising": 0, "Falling": 1,
"GateHigh": 2, "GateLow": 3}
SIGGEN_TRIGGER_SOURCES = {"None": 0, "ScopeTrig": 1, "AuxIn": 2,
"ExtIn": 3, "SoftTrig": 4, "TriggerRaw": 5}
# This is actually different depending on the AB/CD models
# I wonder how we could detect the difference between the oscilloscopes
# I believe we can obtain this information from the setInfo function
# by readign the hardware version
# for the PS6403B version, the hardware version is "1 1",
# an other possibility is that the PS6403B shows up as 6403 when using
# VARIANT_INFO and others show up as PS6403X where X = A,C or D
AWGPhaseAccumulatorSize = 32
AWGDACInterval = 5E-9 # in seconds
AWGDACFrequency = 1 / AWGDACInterval
AWG_INDEX_MODES = {"Single": 0, "Dual": 1, "Quad": 2}
MAX_VALUE_8BIT = 32512
MIN_VALUE_8BIT = -32512
MAX_VALUE_OTHER = 32767
MIN_VALUE_OTHER = -32767
EXT_RANGE_VOLTS = 5
def __init__(self, serialNumber=None, connect=True):
"""Load DLL etc."""
if platform.system() == 'Linux':
from ctypes import cdll
self.lib = cdll.LoadLibrary("lib" + self.LIBNAME + ".so")
elif platform.system() == 'Darwin':
from picoscope.darwin_utils import LoadLibraryDarwin
self.lib = LoadLibraryDarwin("lib" + self.LIBNAME + ".dylib")
else:
from ctypes import windll
from ctypes.util import find_library
self.lib = windll.LoadLibrary(
find_library(str(self.LIBNAME + ".dll"))
)
self.resolution = self.ADC_RESOLUTIONS["8"]
super(PS5000a, self).__init__(serialNumber, connect)
def _lowLevelOpenUnit(self, serialNumber):
c_handle = c_int16()
if serialNumber is not None:
serialNumberStr = create_string_buffer(bytes(serialNumber,
encoding='utf-8'))
else:
serialNumberStr = None
# Passing None is the same as passing NULL
m = self.lib.ps5000aOpenUnit(byref(c_handle), serialNumberStr,
self.resolution)
self.handle = c_handle.value
# This will check if the power supply is not connected
# and change the power supply accordingly
# Personally (me = Mark), I don't like this
# since the user should address this immediately, and we
# shouldn't let this go as a soft error
# but I think this should do for now
if m == 0x11A:
self.changePowerSource(m)
else:
self.checkResult(m)
# B models have different AWG buffer sizes
# 5242B, 5442B: 2**14
# 5243B, 5443B: 2**15
# 5444B, 5244B: 3 * 2**14
# Model 5444B identifies itself properly in VariantInfo, I will assume
# the others do as well.
self.model = self.getUnitInfo('VariantInfo')
# print("Checking variant, found: " + str(self.model))
if self.model in ('5244B', '5444B'):
self.AWGBufferAddressWidth = math.log(3 * 2**14, 2)
self.AWGMaxVal = 32767
self.AWGMinVal = -32768
self.AWGMaxSamples = 49152
elif self.model in ('5243B', '5443B', '5243D', '5443D'):
self.AWGBufferAddressWidth = 15
self.AWGMaxVal = 32767
self.AWGMinVal = -32768
self.AWGMaxSamples = 2**self.AWGBufferAddressWidth
else:
# This is what the previous PS5000a used for all scopes.
# I am leaving it the same, although I think the AWGMaxVal and
# AWGMinVal issue was fixed and should be -32768 to 32767 for all
# 5000 models
self.AWGBufferAddressWidth = 14
# Note this is NOT what is written in the Programming guide as of
# version # 10_5_0_28
# This issue was acknowledged in this thread
# http://www.picotech.com/support/topic13217.html
self.AWGMaxVal = 0x0FFF
self.AWGMinVal = 0x0000
self.AWGMaxSamples = 2**self.AWGBufferAddressWidth
def _lowLevelCloseUnit(self):
m = self.lib.ps5000aCloseUnit(c_int16(self.handle))
self.checkResult(m)
def _lowLevelSetChannel(self, chNum, enabled, coupling, VRange, VOffset,
bandwidth):
m = self.lib.ps5000aSetChannel(c_int16(self.handle), c_enum(chNum),
c_int16(enabled), c_enum(coupling),
c_enum(VRange), c_float(VOffset))
self.checkResult(m)
# The error this might through are
# INVALID_HANDLE
# INVALID_CHANNEL
# INVALID_BANDWIDTH
# Invalid bandwidth is the only case that could go wrong.
# The others would be thrown above (assuming no race condition:
# i.e. unplugging the scope in between this call.
# I decided to keep the logic below to avoid a possible error
# picobase/SetChannel should be changed to the following
# Set the channel
# save the new channel settings
# check if ps5000a
# change the bandwidth separately
# changing the bandwidth would be it's own function (implemented below)
if bandwidth:
m = self.lib.ps5000aSetBandwidthFilter(c_int16(self.handle),
c_enum(chNum), c_enum(1))
else:
m = self.lib.ps5000aSetBandwidthFilter(c_int16(self.handle),
c_enum(chNum), c_enum(0))
self.checkResult(m)
def _lowLevelSetBandwidthFilter(self, channel, bandwidth):
m = self.lib.ps5000aSetBandwidthFilter(c_int16(self.handle),
c_enum(channel),
c_enum(bandwidth))
self.checkResult(m)
def _lowLevelStop(self):
m = self.lib.ps5000aStop(c_int16(self.handle))
self.checkResult(m)
def _lowLevelGetUnitInfo(self, info):
s = create_string_buffer(256)
requiredSize = c_int16(0)
m = self.lib.ps5000aGetUnitInfo(c_int16(self.handle), byref(s),
c_int16(len(s)), byref(requiredSize),
c_enum(info))
self.checkResult(m)
if requiredSize.value > len(s):
s = create_string_buffer(requiredSize.value + 1)
m = self.lib.ps5000aGetUnitInfo(c_int16(self.handle), byref(s),
c_int16(len(s)),
byref(requiredSize), c_enum(info))
self.checkResult(m)
# should this bee ascii instead?
# I think they are equivalent...
return s.value.decode('utf-8')
def _lowLevelFlashLed(self, times):
m = self.lib.ps5000aFlashLed(c_int16(self.handle), c_int16(times))
self.checkResult(m)
def _lowLevelSetSimpleTrigger(self, enabled, trigsrc, threshold_adc,
direction, delay, timeout_ms):
m = self.lib.ps5000aSetSimpleTrigger(
c_int16(self.handle), c_int16(enabled),
c_enum(trigsrc), c_int16(threshold_adc),
c_enum(direction), c_uint32(delay), c_int16(timeout_ms))
self.checkResult(m)
def _lowLevelRunBlock(self, numPreTrigSamples, numPostTrigSamples,
timebase, oversample, segmentIndex, callback,
pParameter):
# Hold a reference to the callback so that the Python
# function pointer doesn't get free'd.
self._c_runBlock_callback = blockReady(callback)
timeIndisposedMs = c_int32()
m = self.lib.ps5000aRunBlock(
c_int16(self.handle), c_uint32(numPreTrigSamples),
c_uint32(numPostTrigSamples), c_uint32(timebase),
byref(timeIndisposedMs), c_uint32(segmentIndex),
self._c_runBlock_callback, c_void_p())
self.checkResult(m)
return timeIndisposedMs.value
def _lowLevelIsReady(self):
ready = c_int16()
m = self.lib.ps5000aIsReady(c_int16(self.handle), byref(ready))
self.checkResult(m)
if ready.value:
return True
else:
return False
def _lowLevelPingUnit(self):
m = self.lib.ps5000aPingUnit(c_int16(self.handle))
return m
def _lowLevelGetTimebase(self, tb, noSamples, oversample, segmentIndex):
"""Return (timeIntervalSeconds, maxSamples)."""
maxSamples = c_int32()
sampleRate = c_float()
m = self.lib.ps5000aGetTimebase2(c_int16(self.handle), c_uint32(tb),
c_uint32(noSamples),
byref(sampleRate),
byref(maxSamples),
c_uint32(segmentIndex))
self.checkResult(m)
return (sampleRate.value / 1.0E9, maxSamples.value)
def getTimeBaseNum(self, sampleTimeS):
"""Convert sample time in S to something to pass to API Call."""
if self.resolution == self.ADC_RESOLUTIONS["8"]:
maxSampleTime = (((2 ** 32 - 1) - 2) / 125000000)
if sampleTimeS < 8.0E-9:
st = math.floor(math.log(sampleTimeS * 1E9, 2))
st = max(st, 0)
else:
if sampleTimeS > maxSampleTime:
sampleTimeS = maxSampleTime
st = math.floor((sampleTimeS * 125000000) + 2)
elif self.resolution == self.ADC_RESOLUTIONS["12"]:
maxSampleTime = (((2 ** 32 - 1) - 3) / 62500000)
if sampleTimeS < 16.0E-9:
st = math.floor(math.log(sampleTimeS * 5E8, 2)) + 1
st = max(st, 1)
else:
if sampleTimeS > maxSampleTime:
sampleTimeS = maxSampleTime
st = math.floor((sampleTimeS * 62500000) + 3)
elif (self.resolution == self.ADC_RESOLUTIONS["14"]) or (
self.resolution == self.ADC_RESOLUTIONS["15"]):
maxSampleTime = (((2 ** 32 - 1) - 2) / 125000000)
if sampleTimeS > maxSampleTime:
sampleTimeS = maxSampleTime
st = math.floor((sampleTimeS * 125000000) + 2)
st = max(st, 3)
elif self.resolution == self.ADC_RESOLUTIONS["16"]:
maxSampleTime = (((2 ** 32 - 1) - 3) / 62500000)
if sampleTimeS > maxSampleTime:
sampleTimeS = maxSampleTime
st = math.floor((sampleTimeS * 62500000) + 3)
st = max(st, 3)
else:
raise ValueError("Invalid Resolution for Device?")
# is this cast needed?
st = int(st)
return st
def getTimestepFromTimebase(self, timebase):
"""Return Timestep from timebase."""
if self.resolution == self.ADC_RESOLUTIONS["8"]:
if timebase < 3:
dt = 2. ** timebase / 1.0E9
else:
dt = (timebase - 2.0) / 125000000.
elif self.resolution == self.ADC_RESOLUTIONS["12"]:
if timebase < 4:
dt = 2. ** (timebase - 1) / 5.0E8
else:
dt = (timebase - 3.0) / 62500000.
elif (self.resolution == self.ADC_RESOLUTIONS["14"]) or (
self.resolution == self.ADC_RESOLUTIONS["15"]):
dt = (timebase - 2.0) / 125000000.
elif self.resolution == self.ADC_RESOLUTIONS["16"]:
dt = (timebase - 3.0) / 62500000.
return dt
def _lowLevelSetAWGSimpleDeltaPhase(self, waveform, deltaPhase,
offsetVoltage, pkToPk, indexMode,
shots, triggerType, triggerSource):
"""Waveform should be an array of shorts."""
waveformPtr = waveform.ctypes.data_as(POINTER(c_int16))
m = self.lib.ps5000aSetSigGenArbitrary(
c_int16(self.handle),
c_uint32(int(offsetVoltage * 1E6)), # offset voltage in microvolts
c_uint32(int(pkToPk * 1E6)), # pkToPk in microvolts
c_uint32(int(deltaPhase)), # startDeltaPhase
c_uint32(int(deltaPhase)), # stopDeltaPhase
c_uint32(0), # deltaPhaseIncrement
c_uint32(0), # dwellCount
waveformPtr, # arbitraryWaveform
c_int32(len(waveform)), # arbitraryWaveformSize
c_enum(0), # sweepType for deltaPhase
c_enum(0), # operation (adding random noise and whatnot)
c_enum(indexMode), # single, dual, quad
c_uint32(shots),
c_uint32(0), # sweeps
c_uint32(triggerType),
c_uint32(triggerSource),
c_int16(0)) # extInThreshold
self.checkResult(m)
def _lowLevelSetDataBuffer(self, channel, data, downSampleMode,
segmentIndex):
"""Set the data buffer.
Be sure to call _lowLevelClearDataBuffer
when you are done with the data array
or else subsequent calls to GetValue will still use the same array.
"""
dataPtr = data.ctypes.data_as(POINTER(c_int16))
numSamples = len(data)
m = self.lib.ps5000aSetDataBuffer(c_int16(self.handle),
c_enum(channel),
dataPtr, c_int32(numSamples),
c_uint32(segmentIndex),
c_enum(downSampleMode))
self.checkResult(m)
def _lowLevelSetDataBufferBulk(self, channel, data, segmentIndex,
downSampleMode):
"""Just calls setDataBuffer with argument order changed.
For compatibility with current picobase.py.
"""
self._lowLevelSetDataBuffer(channel,
data,
downSampleMode,
segmentIndex)
def _lowLevelClearDataBuffer(self, channel, segmentIndex):
m = self.lib.ps5000aSetDataBuffer(c_int16(self.handle),
c_enum(channel),
c_void_p(), c_uint32(0),
c_uint32(segmentIndex),
c_enum(0))
self.checkResult(m)
def _lowLevelGetValues(self, numSamples, startIndex, downSampleRatio,
downSampleMode, segmentIndex):
numSamplesReturned = c_uint32()
numSamplesReturned.value = numSamples
overflow = c_int16()
m = self.lib.ps5000aGetValues(
c_int16(self.handle), c_uint32(startIndex),
byref(numSamplesReturned), c_uint32(downSampleRatio),
c_enum(downSampleMode), c_uint32(segmentIndex),
byref(overflow))
self.checkResult(m)
return (numSamplesReturned.value, overflow.value)
def _lowLevelSetSigGenBuiltInSimple(self, offsetVoltage, pkToPk, waveType,
frequency, shots, triggerType,
triggerSource, stopFreq, increment,
dwellTime, sweepType, numSweeps):
# TODO, I just noticed that V2 exists
# Maybe change to V2 in the future
if stopFreq is None:
stopFreq = frequency
m = self.lib.ps5000aSetSigGenBuiltIn(
c_int16(self.handle),
c_int32(int(offsetVoltage * 1000000)),
c_int32(int(pkToPk * 1000000)),
c_int16(waveType),
c_float(frequency), c_float(stopFreq),
c_float(increment), c_float(dwellTime),
c_enum(sweepType), c_enum(0),
c_uint32(shots), c_uint32(numSweeps),
c_enum(triggerType), c_enum(triggerSource),
c_int16(0))
self.checkResult(m)
def _lowLevelSetDeviceResolution(self, resolution):
self.resolution = resolution
m = self.lib.ps5000aSetDeviceResolution(
c_int16(self.handle),
c_enum(resolution))
self.checkResult(m)
def _lowLevelChangePowerSource(self, powerstate):
m = self.lib.ps5000aChangePowerSource(
c_int16(self.handle),
c_enum(powerstate))
self.checkResult(m)
# Morgan's additions
def _lowLevelGetValuesBulk(self, numSamples, fromSegment, toSegment,
downSampleRatio, downSampleMode, overflow):
"""Copy data from several memory segments at once."""
overflowPoint = overflow.ctypes.data_as(POINTER(c_int16))
m = self.lib.ps5000aGetValuesBulk(
c_int16(self.handle),
byref(c_int32(numSamples)),
c_int32(fromSegment),
c_int32(toSegment),
c_int32(downSampleRatio),
c_enum(downSampleMode),
overflowPoint
)
self.checkResult(m)
def _lowLevelSetNoOfCaptures(self, numCaptures):
m = self.lib.ps5000aSetNoOfCaptures(
c_int16(self.handle),
c_uint32(numCaptures))
self.checkResult(m)
def _lowLevelMemorySegments(self, numSegments):
maxSamples = c_int32()
m = self.lib.ps5000aMemorySegments(
c_int16(self.handle), c_uint32(numSegments), byref(maxSamples))
self.checkResult(m)
return maxSamples.value
def _lowLevelGetValuesTriggerTimeOffsetBulk(self, fromSegment, toSegment):
"""Supposedly gets the trigger times for a bunch of segments at once.
For block mode.
Can't get it to work yet, however.
"""
import numpy as np
nSegments = toSegment - fromSegment + 1
# time = c_int64()
times = np.ascontiguousarray(
np.zeros(nSegments, dtype=np.int64)
)
timeUnits = np.ascontiguousarray(
np.zeros(nSegments, dtype=np.int32)
)
m = self.lib.ps5000aGetValuesTriggerTimeOffsetBulk64(
c_int16(self.handle),
times.ctypes.data_as(POINTER(c_int64)),
timeUnits.ctypes.data_as(POINTER(c_enum)),
c_uint32(fromSegment),
c_uint32(toSegment)
)
self.checkResult(m)
# timeUnits=np.array([self.TIME_UNITS[tu] for tu in timeUnits])
return times, timeUnits
| arunpersaud/pico-python | picoscope/ps5000a.py | Python | bsd-2-clause | 23,542 | [
"Gaussian"
] | 95f1a313613770beed4d4bf79feab07660ded6c96fa9bf12038e3c17bae47fb9 |
from netCDF4 import Dataset
import magic
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import warnings
try:
import gdal
except ModuleNotFoundError:
warnings.warn('gdal not found, this will probably result in prublems (with respect to landuse) further down the road')
def read_file(fname, filetype=None):
"""I think that will work only with the particular I originally use, which I found somewhere here: https://landcover.usgs.gov/landcoverdata.php"""
#######
# this is in order to read the internal map ... not used anymore
# lat = np.linspace(89, -90, 180)
# lon = np.linspace(-180, 179, 360)
#
# fname = '/Users/htelg/Hysplit4/bdyfiles/LANDUSE.ASC'
# land_use_map = pd.read_fwf(fname, names=lon)
# land_use_map.index = lat
allowed_filetypes = ['TIFF', 'netCDF']
if not filetype:
# if filetype not in allowed_filetypes:
# txt = 'Filetype {} not known'
# else:
if 'TIFF image data' in magic.from_file(fname):
filetype = 'TIFF'
elif 'Hierarchical Data Format (version 5) data' in magic.from_file(fname):
filetype = 'netCDF'
if filetype == 'TIFF':
data = gdal.Open(fname)
geotrans = data.GetGeoTransform()
lon_orig = geotrans[0]
lon_sw = geotrans[1]
lat_orig = geotrans[3]
lat_sw = geotrans[5]
lon_fin = lon_orig + (data.RasterXSize * lon_sw)
lon_arr = np.arange(lon_orig, lon_fin, lon_sw)
lat_fin = lat_orig + (data.RasterYSize * lat_sw)
lat_arr = np.arange(lat_orig, lat_fin, lat_sw)
arr = data.ReadAsArray()
arr = pd.DataFrame(arr, index=lat_arr, columns=lon_arr)
# These are saved LandUseMap instances
elif filetype == 'netCDF':
nc = Dataset(fname, 'r')
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
land_use_data = nc.variables['land_use'][:]
arr = pd.DataFrame(land_use_data, index=lat, columns=lon)
else:
txt = 'Filetype "{}" not known or not recognized. Try to set the "filetype" kwarg. Allowed options are {}'.format(filetype, allowed_filetypes)
raise ValueError(txt)
# legend
land_types = ['Water',
'Evergreen Needle leaf Forest',
'Evergreen Broadleaf Forest',
'Deciduous Needle leaf Forest',
'Deciduous Broadleaf Forest',
'Mixed Forests',
'Closed Shrublands',
'Open Shrublands',
'Woody Savannas',
'Savannas',
'Grasslands',
'Permanent Wetland',
'Croplands',
'Urban and Built-Up',
'Cropland/Natural Vegetation Mosaic',
'Snow and Ice',
'Barren or Sparsely Vegetated']
land_types_legend = pd.DataFrame(land_types)
land_types_legend.columns = ['land_use_type']
land_types_legend['color'] = np.nan
return LandUseMap(arr, land_types_legend)
def save2netCDF(land_use_map, fname, leave_open=False):
nc = Dataset(fname, 'w')
### Dimensions
lat_dim = nc.createDimension('lat', land_use_map.land_use_data.index.shape[0])
lon_dim = nc.createDimension('lon', land_use_map.land_use_data.columns.shape[0])
### Variables
lat_var = nc.createVariable('lat', land_use_map.land_use_data.index.dtype, 'lat')
lat_var[:] = land_use_map.land_use_data.index.values
lon_var = nc.createVariable('lon', land_use_map.land_use_data.columns.dtype, 'lon')
lon_var[:] = land_use_map.land_use_data.columns.values
land_use_var = nc.createVariable('land_use', land_use_map.land_use_data.values.dtype, ('lat', 'lon',))
land_use_var[:] = land_use_map.land_use_data.values
if not leave_open:
nc.close()
return
def plot_land_use_map(self, **kwargs):
lon_2d, lat_2d = np.meshgrid(self.land_use_data.columns, self.land_use_data.index)
f, a = plt.subplots()
pc = a.pcolormesh(lon_2d, lat_2d, self.land_use_data)
return f, a, pc
class LandUseMap(object):
def __init__(self, df, legend = None):
self.land_use_data = df
self.legend = legend
plot = plot_land_use_map
save = save2netCDF
def get_resolution(self):
it = self.land_use_data.index.values
res_it = (it[:-1] - it[1:]).mean()
ct = self.land_use_data.columns.values
res_ct = (ct[1:] - ct[:-1]).mean()
return (res_it, res_ct)
def down_sample(self, nrows=2, ncols=2):
mod_row = self.land_use_data.shape[0] % nrows
mod_col = self.land_use_data.shape[1] % ncols
if mod_row or mod_col:
nrows_h = nrows
nrows_l = nrows
while self.land_use_data.shape[0] % nrows_h:
nrows_h += 1
while self.land_use_data.shape[0] % nrows_l:
nrows_l -= 1
nrows_sug = (nrows_l, nrows_h)
ncols_h = ncols
ncols_l = ncols
while self.land_use_data.shape[0] % ncols_h:
ncols_h += 1
while self.land_use_data.shape[0] % ncols_l:
ncols_l -= 1
ncols_sug = (ncols_l, ncols_h)
txt = 'Non-integer number of blocksizes sizes. Adjust nrows and ncols so an integer number of blocks fit into current grid. Suggestions: nrows = {}, ncols = {}'.format(nrows_sug,
ncols_sug)
raise ValueError(txt)
def get_number_of_max_occurence(at):
counts = np.bincount(at.reshape(at.size))
# randomize in case argmax ambiguous
# if np.argmax(counts) != (len(counts) - 1 - np.argmax(counts[::-1])):
counts[counts < counts.max()] = 0
counts = counts * np.random.random(counts.size)
return np.argmax(counts)
a = self.land_use_data.values
h, w = a.shape
a1 = a.reshape(h // nrows, nrows, -1, ncols)
a2 = a1.swapaxes(1, 2)
a3 = a2.reshape(-1, nrows, ncols)
res = np.zeros(a3.shape[0])
for e, block in enumerate(a3):
res[e] = get_number_of_max_occurence(block)
# res = res.reshape(np.array(a.shape) // 10)
res = res.reshape((a.shape[0] // nrows, a.shape[1] // ncols))
# nrows = 10
# ncols = 10
idx_ds = np.apply_along_axis(lambda x: x.mean(), 1, self.land_use_data.index.values.reshape(-1, nrows))
col_ds = np.apply_along_axis(lambda x: x.mean(), 1, self.land_use_data.columns.values.reshape(-1, ncols))
df = pd.DataFrame(res.astype(int), index=idx_ds, columns=col_ds)
return LandUseMap(df)
| hagne/hysplit-py | hysplit_py/land_use_map.py | Python | gpl-3.0 | 6,848 | [
"NetCDF"
] | 5b711428897475d0e764df4c274fd66fde2904f561a808b52c83e4df9cf93adf |
""" @package antlr3.tree
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
import re
from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE
from antlr3.recognizers import BaseRecognizer, RuleReturnScope
from antlr3.streams import IntStream
from antlr3.tokens import CommonToken, Token, INVALID_TOKEN
from antlr3.exceptions import MismatchedTreeNodeException, \
MissingTokenException, UnwantedTokenException, MismatchedTokenException, \
NoViableAltException
############################################################################
#
# tree related exceptions
#
############################################################################
class RewriteCardinalityException(RuntimeError):
"""
@brief Base class for all exceptions thrown during AST rewrite construction.
This signifies a case where the cardinality of two or more elements
in a subrule are different: (ID INT)+ where |ID|!=|INT|
"""
def __init__(self, elementDescription):
RuntimeError.__init__(self, elementDescription)
self.elementDescription = elementDescription
def getMessage(self):
return self.elementDescription
class RewriteEarlyExitException(RewriteCardinalityException):
"""@brief No elements within a (...)+ in a rewrite rule"""
def __init__(self, elementDescription=None):
RewriteCardinalityException.__init__(self, elementDescription)
class RewriteEmptyStreamException(RewriteCardinalityException):
"""
@brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream
"""
pass
############################################################################
#
# basic Tree and TreeAdaptor interfaces
#
############################################################################
class Tree(object):
"""
@brief Abstract baseclass for tree nodes.
What does a tree look like? ANTLR has a number of support classes
such as CommonTreeNodeStream that work on these kinds of trees. You
don't have to make your trees implement this interface, but if you do,
you'll be able to use more support code.
NOTE: When constructing trees, ANTLR can build any kind of tree; it can
even use Token objects as trees if you add a child list to your tokens.
This is a tree node without any payload; just navigation and factory stuff.
"""
def getChild(self, i):
raise NotImplementedError
def getChildCount(self):
raise NotImplementedError
def getParent(self):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def setParent(self, t):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def hasAncestor(self, ttype):
"""Walk upwards looking for ancestor with this token type."""
raise NotImplementedError
def getAncestor(self, ttype):
"""Walk upwards and get first ancestor with this token type."""
raise NotImplementedError
def getAncestors(self):
"""Return a list of all ancestors of this node.
The first node of list is the root and the last is the parent of
this node.
"""
raise NotImplementedError
def getChildIndex(self):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def setChildIndex(self, index):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def freshenParentAndChildIndexes(self):
"""Set the parent and child index values for all children"""
raise NotImplementedError
def addChild(self, t):
"""
Add t as a child to this node. If t is null, do nothing. If t
is nil, add all children of t to this' children.
"""
raise NotImplementedError
def setChild(self, i, t):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, i):
raise NotImplementedError
def replaceChildren(self, startChildIndex, stopChildIndex, t):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
raise NotImplementedError
def isNil(self):
"""
Indicates the node is a nil node but may still have children, meaning
the tree is a flat list.
"""
raise NotImplementedError
def getTokenStartIndex(self):
"""
What is the smallest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStartIndex(self, index):
raise NotImplementedError
def getTokenStopIndex(self):
"""
What is the largest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStopIndex(self, index):
raise NotImplementedError
def dupNode(self):
raise NotImplementedError
def getType(self):
"""Return a token type; needed for tree parsing."""
raise NotImplementedError
def getText(self):
raise NotImplementedError
def getLine(self):
"""
In case we don't have a token payload, what is the line for errors?
"""
raise NotImplementedError
def getCharPositionInLine(self):
raise NotImplementedError
def toStringTree(self):
raise NotImplementedError
def toString(self):
raise NotImplementedError
class TreeAdaptor(object):
"""
@brief Abstract baseclass for tree adaptors.
How to create and navigate trees. Rather than have a separate factory
and adaptor, I've merged them. Makes sense to encapsulate.
This takes the place of the tree construction code generated in the
generated code in 2.x and the ASTFactory.
I do not need to know the type of a tree at all so they are all
generic Objects. This may increase the amount of typecasting needed. :(
"""
# C o n s t r u c t i o n
def createWithPayload(self, payload):
"""
Create a tree node from Token object; for CommonTree type trees,
then the token just becomes the payload. This is the most
common create call.
Override if you want another kind of node to be built.
"""
raise NotImplementedError
def dupNode(self, treeNode):
"""Duplicate a single tree node.
Override if you want another kind of node to be built."""
raise NotImplementedError
def dupTree(self, tree):
"""Duplicate tree recursively, using dupNode() for each node"""
raise NotImplementedError
def nil(self):
"""
Return a nil node (an empty but non-null node) that can hold
a list of element as the children. If you want a flat tree (a list)
use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
"""
raise NotImplementedError
def errorNode(self, input, start, stop, exc):
"""
Return a tree node representing an error. This node records the
tokens consumed during error recovery. The start token indicates the
input symbol at which the error was detected. The stop token indicates
the last symbol consumed during recovery.
You must specify the input stream so that the erroneous text can
be packaged up in the error node. The exception could be useful
to some applications; default implementation stores ptr to it in
the CommonErrorNode.
This only makes sense during token parsing, not tree parsing.
Tree parsing should happen only when parsing and tree construction
succeed.
"""
raise NotImplementedError
def isNil(self, tree):
"""Is tree considered a nil node used to make lists of child nodes?"""
raise NotImplementedError
def addChild(self, t, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs. Do nothing if t or child is null.
"""
raise NotImplementedError
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
raise NotImplementedError
def rulePostProcessing(self, root):
"""
Given the root of the subtree created for this rule, post process
it to do any simplifications or whatever you want. A required
behavior is to convert ^(nil singleSubtree) to singleSubtree
as the setting of start/stop indexes relies on a single non-nil root
for non-flat trees.
Flat trees such as for lists like "idlist : ID+ ;" are left alone
unless there is only one ID. For a list, the start/stop indexes
are set in the nil node.
This method is executed after all rule tree construction and right
before setTokenBoundaries().
"""
raise NotImplementedError
def getUniqueID(self, node):
"""For identifying trees.
How to identify nodes so we can say "add node to a prior node"?
Even becomeRoot is an issue. Use System.identityHashCode(node)
usually.
"""
raise NotImplementedError
# R e w r i t e R u l e s
def createFromToken(self, tokenType, fromToken, text=None):
"""
Create a new node derived from a token, with a new token type and
(optionally) new text.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"].
This should invoke createToken(Token).
"""
raise NotImplementedError
def createFromType(self, tokenType, text):
"""Create a new node derived from a token, with a new token type.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG["IMAG"].
This should invoke createToken(int,String).
"""
raise NotImplementedError
# C o n t e n t
def getType(self, t):
"""For tree parsing, I need to know the token type of a node"""
raise NotImplementedError
def setType(self, t, type):
"""Node constructors can set the type of a node"""
raise NotImplementedError
def getText(self, t):
raise NotImplementedError
def setText(self, t, text):
"""Node constructors can set the text of a node"""
raise NotImplementedError
def getToken(self, t):
"""Return the token object from which this node was created.
Currently used only for printing an error message.
The error display routine in BaseRecognizer needs to
display where the input the error occurred. If your
tree of limitation does not store information that can
lead you to the token, you can create a token filled with
the appropriate information and pass that back. See
BaseRecognizer.getErrorMessage().
"""
raise NotImplementedError
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Where are the bounds in the input token stream for this node and
all children? Each rule that creates AST nodes will call this
method right before returning. Flat trees (i.e., lists) will
still usually have a nil root node just to hold the children list.
That node would contain the start/stop indexes then.
"""
raise NotImplementedError
def getTokenStartIndex(self, t):
"""
Get the token start index for this subtree; return -1 if no such index
"""
raise NotImplementedError
def getTokenStopIndex(self, t):
"""
Get the token stop index for this subtree; return -1 if no such index
"""
raise NotImplementedError
# N a v i g a t i o n / T r e e P a r s i n g
def getChild(self, t, i):
"""Get a child 0..n-1 node"""
raise NotImplementedError
def setChild(self, t, i, child):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, t, i):
"""Remove ith child and shift children down from right."""
raise NotImplementedError
def getChildCount(self, t):
"""How many children? If 0, then this is a leaf node"""
raise NotImplementedError
def getParent(self, t):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setParent(self, t, parent):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def getChildIndex(self, t):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setChildIndex(self, t, index):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
# Misc
def create(self, *args):
"""
Deprecated, use createWithPayload, createFromToken or createFromType.
This method only exists to mimic the Java interface of TreeAdaptor.
"""
if len(args) == 1 and isinstance(args[0], Token):
# Object create(Token payload);
## warnings.warn(
## "Using create() is deprecated, use createWithPayload()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createWithPayload(args[0])
if (len(args) == 2
and isinstance(args[0], int)
and isinstance(args[1], Token)
):
# Object create(int tokenType, Token fromToken);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1])
if (len(args) == 3
and isinstance(args[0], int)
and isinstance(args[1], Token)
and isinstance(args[2], str)
):
# Object create(int tokenType, Token fromToken, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1], args[2])
if (len(args) == 2
and isinstance(args[0], int)
and isinstance(args[1], str)
):
# Object create(int tokenType, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromType()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromType(args[0], args[1])
raise TypeError(
"No create method with this signature found: %s"
% (', '.join(type(v).__name__ for v in args))
)
############################################################################
#
# base implementation of Tree and TreeAdaptor
#
# Tree
# \- BaseTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
#
############################################################################
class BaseTree(Tree):
"""
@brief A generic tree implementation with no payload.
You must subclass to
actually have any user data. ANTLR v3 uses a list of children approach
instead of the child-sibling approach in v2. A flat tree (a list) is
an empty node whose children represent the list. An empty, but
non-null node is called "nil".
"""
# BaseTree is abstract, no need to complain about not implemented abstract
# methods
# pylint: disable-msg=W0223
def __init__(self, node=None):
"""
Create a new node from an existing node does nothing for BaseTree
as there are no fields other than the children list, which cannot
be copied as the children are not considered part of this node.
"""
Tree.__init__(self)
self.children = []
self.parent = None
self.childIndex = 0
def getChild(self, i):
try:
return self.children[i]
except IndexError:
return None
def getChildren(self):
"""@brief Get the children internal List
Note that if you directly mess with
the list, do so at your own risk.
"""
# FIXME: mark as deprecated
return self.children
def getFirstChildWithType(self, treeType):
for child in self.children:
if child.getType() == treeType:
return child
return None
def getChildCount(self):
return len(self.children)
def addChild(self, childTree):
"""Add t as child of this node.
Warning: if t has no children, but child does
and child isNil then this routine moves children to t via
t.children = child.children; i.e., without copying the array.
"""
# this implementation is much simpler and probably less efficient
# than the mumbo-jumbo that Ter did for the Java runtime.
if childTree is None:
return
if childTree.isNil():
# t is an empty node possibly with children
if self.children is childTree.children:
raise ValueError("attempt to add child list to itself")
# fix parent pointer and childIndex for new children
for idx, child in enumerate(childTree.children):
child.parent = self
child.childIndex = len(self.children) + idx
self.children += childTree.children
else:
# child is not nil (don't care about children)
self.children.append(childTree)
childTree.parent = self
childTree.childIndex = len(self.children) - 1
def addChildren(self, children):
"""Add all elements of kids list as children of this node"""
self.children += children
def setChild(self, i, t):
if t is None:
return
if t.isNil():
raise ValueError("Can't set single child to a list")
self.children[i] = t
t.parent = self
t.childIndex = i
def deleteChild(self, i):
killed = self.children[i]
del self.children[i]
# walk rest and decrement their child indexes
for idx, child in enumerate(self.children[i:]):
child.childIndex = i + idx
return killed
def replaceChildren(self, startChildIndex, stopChildIndex, newTree):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
if (startChildIndex >= len(self.children)
or stopChildIndex >= len(self.children)
):
raise IndexError("indexes invalid")
replacingHowMany = stopChildIndex - startChildIndex + 1
# normalize to a list of children to add: newChildren
if newTree.isNil():
newChildren = newTree.children
else:
newChildren = [newTree]
replacingWithHowMany = len(newChildren)
delta = replacingHowMany - replacingWithHowMany
if delta == 0:
# if same number of nodes, do direct replace
for idx, child in enumerate(newChildren):
self.children[idx + startChildIndex] = child
child.parent = self
child.childIndex = idx + startChildIndex
else:
# length of children changes...
# ...delete replaced segment...
del self.children[startChildIndex:stopChildIndex+1]
# ...insert new segment...
self.children[startChildIndex:startChildIndex] = newChildren
# ...and fix indeces
self.freshenParentAndChildIndexes(startChildIndex)
def isNil(self):
return False
def freshenParentAndChildIndexes(self, offset=0):
for idx, child in enumerate(self.children[offset:]):
child.childIndex = idx + offset
child.parent = self
def sanityCheckParentAndChildIndexes(self, parent=None, i=-1):
if parent != self.parent:
raise ValueError(
"parents don't match; expected %r found %r"
% (parent, self.parent)
)
if i != self.childIndex:
raise ValueError(
"child indexes don't match; expected %d found %d"
% (i, self.childIndex)
)
for idx, child in enumerate(self.children):
child.sanityCheckParentAndChildIndexes(self, idx)
def getChildIndex(self):
"""BaseTree doesn't track child indexes."""
return 0
def setChildIndex(self, index):
"""BaseTree doesn't track child indexes."""
pass
def getParent(self):
"""BaseTree doesn't track parent pointers."""
return None
def setParent(self, t):
"""BaseTree doesn't track parent pointers."""
pass
def hasAncestor(self, ttype):
"""Walk upwards looking for ancestor with this token type."""
return self.getAncestor(ttype) is not None
def getAncestor(self, ttype):
"""Walk upwards and get first ancestor with this token type."""
t = self.getParent()
while t is not None:
if t.getType() == ttype:
return t
t = t.getParent()
return None
def getAncestors(self):
"""Return a list of all ancestors of this node.
The first node of list is the root and the last is the parent of
this node.
"""
if selfgetParent() is None:
return None
ancestors = []
t = self.getParent()
while t is not None:
ancestors.insert(0, t) # insert at start
t = t.getParent()
return ancestors
def toStringTree(self):
"""Print out a whole tree not just a node"""
if len(self.children) == 0:
return self.toString()
buf = []
if not self.isNil():
buf.append('(')
buf.append(self.toString())
buf.append(' ')
for i, child in enumerate(self.children):
if i > 0:
buf.append(' ')
buf.append(child.toStringTree())
if not self.isNil():
buf.append(')')
return ''.join(buf)
def getLine(self):
return 0
def getCharPositionInLine(self):
return 0
def toString(self):
"""Override to say how a node (not a tree) should look as text"""
raise NotImplementedError
class BaseTreeAdaptor(TreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
"""
# BaseTreeAdaptor is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def nil(self):
return self.createWithPayload(None)
def errorNode(self, input, start, stop, exc):
"""
create tree node that holds the start and stop tokens associated
with an error.
If you specify your own kind of tree nodes, you will likely have to
override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
if no token payload but you might have to set token type for diff
node type.
You don't have to subclass CommonErrorNode; you will likely need to
subclass your own tree node class to avoid class cast exception.
"""
return CommonErrorNode(input, start, stop, exc)
def isNil(self, tree):
return tree.isNil()
def dupTree(self, t, parent=None):
"""
This is generic in the sense that it will work with any kind of
tree (not just Tree interface). It invokes the adaptor routines
not the tree node routines to do the construction.
"""
if t is None:
return None
newTree = self.dupNode(t)
# ensure new subtree root has parent/child index set
# same index in new tree
self.setChildIndex(newTree, self.getChildIndex(t))
self.setParent(newTree, parent)
for i in range(self.getChildCount(t)):
child = self.getChild(t, i)
newSubTree = self.dupTree(child, t)
self.addChild(newTree, newSubTree)
return newTree
def addChild(self, tree, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs.
"""
#if isinstance(child, Token):
# child = self.createWithPayload(child)
if tree is not None and child is not None:
tree.addChild(child)
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
if isinstance(newRoot, Token):
newRoot = self.create(newRoot)
if oldRoot is None:
return newRoot
if not isinstance(newRoot, CommonTree):
newRoot = self.createWithPayload(newRoot)
# handle ^(nil real-node)
if newRoot.isNil():
nc = newRoot.getChildCount()
if nc == 1:
newRoot = newRoot.getChild(0)
elif nc > 1:
# TODO: make tree run time exceptions hierarchy
raise RuntimeError("more than one node as root")
# add oldRoot to newRoot; addChild takes care of case where oldRoot
# is a flat list (i.e., nil-rooted tree). All children of oldRoot
# are added to newRoot.
newRoot.addChild(oldRoot)
return newRoot
def rulePostProcessing(self, root):
"""Transform ^(nil x) to x and nil to null"""
if root is not None and root.isNil():
if root.getChildCount() == 0:
root = None
elif root.getChildCount() == 1:
root = root.getChild(0)
# whoever invokes rule will set parent and child index
root.setParent(None)
root.setChildIndex(-1)
return root
def createFromToken(self, tokenType, fromToken, text=None):
assert isinstance(tokenType, int), type(tokenType).__name__
assert isinstance(fromToken, Token), type(fromToken).__name__
assert text is None or isinstance(text, str), type(text).__name__
fromToken = self.createToken(fromToken)
fromToken.type = tokenType
if text is not None:
fromToken.text = text
t = self.createWithPayload(fromToken)
return t
def createFromType(self, tokenType, text):
assert isinstance(tokenType, int), type(tokenType).__name__
assert isinstance(text, str), type(text).__name__
fromToken = self.createToken(tokenType=tokenType, text=text)
t = self.createWithPayload(fromToken)
return t
def getType(self, t):
return t.getType()
def setType(self, t, type):
raise RuntimeError("don't know enough about Tree node")
def getText(self, t):
return t.getText()
def setText(self, t, text):
raise RuntimeError("don't know enough about Tree node")
def getChild(self, t, i):
return t.getChild(i)
def setChild(self, t, i, child):
t.setChild(i, child)
def deleteChild(self, t, i):
return t.deleteChild(i)
def getChildCount(self, t):
return t.getChildCount()
def getUniqueID(self, node):
return hash(node)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
raise NotImplementedError
############################################################################
#
# common tree implementation
#
# Tree
# \- BaseTree
# \- CommonTree
# \- CommonErrorNode
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class CommonTree(BaseTree):
"""@brief A tree node that is wrapper for a Token object.
After 3.0 release
while building tree rewrite stuff, it became clear that computing
parent and child index is very difficult and cumbersome. Better to
spend the space in every tree node. If you don't want these extra
fields, it's easy to cut them out in your own BaseTree subclass.
"""
def __init__(self, payload):
BaseTree.__init__(self)
# What token indexes bracket all tokens associated with this node
# and below?
self.startIndex = -1
self.stopIndex = -1
# Who is the parent node of this node; if null, implies node is root
self.parent = None
# What index is this node in the child list? Range: 0..n-1
self.childIndex = -1
# A single token is the payload
if payload is None:
self.token = None
elif isinstance(payload, CommonTree):
self.token = payload.token
self.startIndex = payload.startIndex
self.stopIndex = payload.stopIndex
elif payload is None or isinstance(payload, Token):
self.token = payload
else:
raise TypeError(type(payload).__name__)
def getToken(self):
return self.token
def dupNode(self):
return CommonTree(self)
def isNil(self):
return self.token is None
def getType(self):
if self.token is None:
return INVALID_TOKEN_TYPE
return self.token.getType()
type = property(getType)
def getText(self):
if self.token is None:
return None
return self.token.text
text = property(getText)
def getLine(self):
if self.token is None or self.token.getLine() == 0:
if self.getChildCount():
return self.getChild(0).getLine()
else:
return 0
return self.token.getLine()
line = property(getLine)
def getCharPositionInLine(self):
if self.token is None or self.token.getCharPositionInLine() == -1:
if self.getChildCount():
return self.getChild(0).getCharPositionInLine()
else:
return 0
else:
return self.token.getCharPositionInLine()
charPositionInLine = property(getCharPositionInLine)
def getTokenStartIndex(self):
if self.startIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.startIndex
def setTokenStartIndex(self, index):
self.startIndex = index
tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex)
def getTokenStopIndex(self):
if self.stopIndex == -1 and self.token is not None:
return self.token.getTokenIndex()
return self.stopIndex
def setTokenStopIndex(self, index):
self.stopIndex = index
tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex)
def setUnknownTokenBoundaries(self):
"""For every node in this subtree, make sure it's start/stop token's
are set. Walk depth first, visit bottom up. Only updates nodes
with at least one token index < 0.
"""
if self.children is None:
if self.startIndex < 0 or self.stopIndex < 0:
self.startIndex = self.stopIndex = self.token.getTokenIndex()
return
for child in self.children:
child.setUnknownTokenBoundaries()
if self.startIndex >= 0 and self.stopIndex >= 0:
# already set
return
if self.children:
firstChild = self.children[0]
lastChild = self.children[-1]
self.startIndex = firstChild.getTokenStartIndex()
self.stopIndex = lastChild.getTokenStopIndex()
def getChildIndex(self):
#FIXME: mark as deprecated
return self.childIndex
def setChildIndex(self, idx):
#FIXME: mark as deprecated
self.childIndex = idx
def getParent(self):
#FIXME: mark as deprecated
return self.parent
def setParent(self, t):
#FIXME: mark as deprecated
self.parent = t
def toString(self):
if self.isNil():
return "nil"
if self.getType() == INVALID_TOKEN_TYPE:
return "<errornode>"
return self.token.text
__str__ = toString
def toStringTree(self):
if not self.children:
return self.toString()
ret = ''
if not self.isNil():
ret += '(%s ' % (self.toString())
ret += ' '.join([child.toStringTree() for child in self.children])
if not self.isNil():
ret += ')'
return ret
INVALID_NODE = CommonTree(INVALID_TOKEN)
class CommonErrorNode(CommonTree):
"""A node representing erroneous token range in token stream"""
def __init__(self, input, start, stop, exc):
CommonTree.__init__(self, None)
if (stop is None or
(stop.getTokenIndex() < start.getTokenIndex() and
stop.getType() != EOF
)
):
# sometimes resync does not consume a token (when LT(1) is
# in follow set. So, stop will be 1 to left to start. adjust.
# Also handle case where start is the first token and no token
# is consumed during recovery; LT(-1) will return null.
stop = start
self.input = input
self.start = start
self.stop = stop
self.trappedException = exc
def isNil(self):
return False
def getType(self):
return INVALID_TOKEN_TYPE
def getText(self):
if isinstance(self.start, Token):
i = self.start.getTokenIndex()
j = self.stop.getTokenIndex()
if self.stop.getType() == EOF:
j = self.input.size()
badText = self.input.toString(i, j)
elif isinstance(self.start, Tree):
badText = self.input.toString(self.start, self.stop)
else:
# people should subclass if they alter the tree type so this
# next one is for sure correct.
badText = "<unknown>"
return badText
def toString(self):
if isinstance(self.trappedException, MissingTokenException):
return ("<missing type: "
+ str(self.trappedException.getMissingType())
+ ">")
elif isinstance(self.trappedException, UnwantedTokenException):
return ("<extraneous: "
+ str(self.trappedException.getUnexpectedToken())
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, MismatchedTokenException):
return ("<mismatched token: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, NoViableAltException):
return ("<unexpected: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
return "<error: "+self.getText()+">"
class CommonTreeAdaptor(BaseTreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
It provides
really just factory methods; all the work is done by BaseTreeAdaptor.
If you would like to have different tokens created than ClassicToken
objects, you need to override this and then set the parser tree adaptor to
use your subclass.
To get your parser to build nodes of a different type, override
create(Token), errorNode(), and to be safe, YourTreeClass.dupNode().
dupNode is called to duplicate nodes during rewrite operations.
"""
def dupNode(self, treeNode):
"""
Duplicate a node. This is part of the factory;
override if you want another kind of node to be built.
I could use reflection to prevent having to override this
but reflection is slow.
"""
if treeNode is None:
return None
return treeNode.dupNode()
def createWithPayload(self, payload):
return CommonTree(payload)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
if fromToken is not None:
return CommonToken(oldToken=fromToken)
return CommonToken(type=tokenType, text=text)
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Track start/stop token for subtree root created for a rule.
Only works with Tree nodes. For rules that match nothing,
seems like this will yield start=i and stop=i-1 in a nil node.
Might be useful info so I'll not force to be i..i.
"""
if t is None:
return
start = 0
stop = 0
if startToken is not None:
start = startToken.index
if stopToken is not None:
stop = stopToken.index
t.setTokenStartIndex(start)
t.setTokenStopIndex(stop)
def getTokenStartIndex(self, t):
if t is None:
return -1
return t.getTokenStartIndex()
def getTokenStopIndex(self, t):
if t is None:
return -1
return t.getTokenStopIndex()
def getText(self, t):
if t is None:
return None
return t.getText()
def getType(self, t):
if t is None:
return INVALID_TOKEN_TYPE
return t.getType()
def getToken(self, t):
"""
What is the Token associated with this node? If
you are not using CommonTree, then you must
override this in your own adaptor.
"""
if isinstance(t, CommonTree):
return t.getToken()
return None # no idea what to do
def getChild(self, t, i):
if t is None:
return None
return t.getChild(i)
def getChildCount(self, t):
if t is None:
return 0
return t.getChildCount()
def getParent(self, t):
return t.getParent()
def setParent(self, t, parent):
t.setParent(parent)
def getChildIndex(self, t):
if t is None:
return 0
return t.getChildIndex()
def setChildIndex(self, t, index):
t.setChildIndex(index)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
parent.replaceChildren(startChildIndex, stopChildIndex, t)
############################################################################
#
# streams
#
# TreeNodeStream
# \- BaseTree
# \- CommonTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class TreeNodeStream(IntStream):
"""@brief A stream of tree nodes
It accessing nodes from a tree of some kind.
"""
# TreeNodeStream is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def get(self, i):
"""Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you.
"""
raise NotImplementedError
def LT(self, k):
"""
Get tree node at current input pointer + i ahead where i=1 is next node.
i<0 indicates nodes in the past. So LT(-1) is previous node, but
implementations are not required to provide results for k < -1.
LT(0) is undefined. For i>=n, return null.
Return null for LT(0) and any index that results in an absolute address
that is negative.
This is analogus to the LT() method of the TokenStream, but this
returns a tree node instead of a token. Makes code gen identical
for both parser and tree grammars. :)
"""
raise NotImplementedError
def getTreeSource(self):
"""
Where is this stream pulling nodes from? This is not the name, but
the object that provides node objects.
"""
raise NotImplementedError
def getTokenStream(self):
"""
If the tree associated with this stream was created from a TokenStream,
you can specify it here. Used to do rule $text attribute in tree
parser. Optional unless you use tree parser rule text attribute
or output=template and rewrite=true options.
"""
raise NotImplementedError
def getTreeAdaptor(self):
"""
What adaptor can tell me how to interpret/navigate nodes and
trees. E.g., get text of a node.
"""
raise NotImplementedError
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so we have to instantiate new ones. When doing normal tree
parsing, it's slow and a waste of memory to create unique
navigation nodes. Default should be false;
"""
raise NotImplementedError
def toString(self, start, stop):
"""
Return the text of all nodes from start to stop, inclusive.
If the stream does not buffer all the nodes then it can still
walk recursively from start until stop. You can always return
null or "" too, but users should not access $ruleLabel.text in
an action of course in that case.
"""
raise NotImplementedError
# REWRITING TREES (used by tree parser)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call. The stream is notified because it is walking the
tree and might need to know you are monkeying with the underlying
tree. Also, it might be able to modify the node stream to avoid
restreaming for future phases.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
class CommonTreeNodeStream(TreeNodeStream):
"""@brief A buffered stream of tree nodes.
Nodes can be from a tree of ANY kind.
This node stream sucks all nodes out of the tree specified in
the constructor during construction and makes pointers into
the tree using an array of Object pointers. The stream necessarily
includes pointers to DOWN and UP and EOF nodes.
This stream knows how to mark/release for backtracking.
This stream is most suitable for tree interpreters that need to
jump around a lot or for tree parsers requiring speed (at cost of memory).
There is some duplicated functionality here with UnBufferedTreeNodeStream
but just in bookkeeping, not tree walking etc...
@see UnBufferedTreeNodeStream
"""
def __init__(self, *args):
TreeNodeStream.__init__(self)
if len(args) == 1:
adaptor = CommonTreeAdaptor()
tree = args[0]
nodes = None
down = None
up = None
eof = None
elif len(args) == 2:
adaptor = args[0]
tree = args[1]
nodes = None
down = None
up = None
eof = None
elif len(args) == 3:
parent = args[0]
start = args[1]
stop = args[2]
adaptor = parent.adaptor
tree = parent.root
nodes = parent.nodes[start:stop]
down = parent.down
up = parent.up
eof = parent.eof
else:
raise TypeError("Invalid arguments")
# all these navigation nodes are shared and hence they
# cannot contain any line/column info
if down is not None:
self.down = down
else:
self.down = adaptor.createFromType(DOWN, "DOWN")
if up is not None:
self.up = up
else:
self.up = adaptor.createFromType(UP, "UP")
if eof is not None:
self.eof = eof
else:
self.eof = adaptor.createFromType(EOF, "EOF")
# The complete mapping from stream index to tree node.
# This buffer includes pointers to DOWN, UP, and EOF nodes.
# It is built upon ctor invocation. The elements are type
# Object as we don't what the trees look like.
# Load upon first need of the buffer so we can set token types
# of interest for reverseIndexing. Slows us down a wee bit to
# do all of the if p==-1 testing everywhere though.
if nodes is not None:
self.nodes = nodes
else:
self.nodes = []
# Pull nodes from which tree?
self.root = tree
# IF this tree (root) was created from a token stream, track it.
self.tokens = None
# What tree adaptor was used to build these trees
self.adaptor = adaptor
# Reuse same DOWN, UP navigation nodes unless this is true
self.uniqueNavigationNodes = False
# The index into the nodes list of the current node (next node
# to consume). If -1, nodes array not filled yet.
self.p = -1
# Track the last mark() call result value for use in rewind().
self.lastMarker = None
# Stack of indexes used for push/pop calls
self.calls = []
def fillBuffer(self):
"""Walk tree with depth-first-search and fill nodes buffer.
Don't do DOWN, UP nodes if its a list (t is isNil).
"""
self._fillBuffer(self.root)
self.p = 0 # buffer of nodes intialized now
def _fillBuffer(self, t):
nil = self.adaptor.isNil(t)
if not nil:
self.nodes.append(t) # add this node
# add DOWN node if t has children
n = self.adaptor.getChildCount(t)
if not nil and n > 0:
self.addNavigationNode(DOWN)
# and now add all its children
for c in range(n):
self._fillBuffer(self.adaptor.getChild(t, c))
# add UP node if t has children
if not nil and n > 0:
self.addNavigationNode(UP)
def getNodeIndex(self, node):
"""What is the stream index for node? 0..n-1
Return -1 if node not found.
"""
if self.p == -1:
self.fillBuffer()
for i, t in enumerate(self.nodes):
if t == node:
return i
return -1
def addNavigationNode(self, ttype):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so instantiate new ones when uniqueNavigationNodes is true.
"""
navNode = None
if ttype == DOWN:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(DOWN, "DOWN")
else:
navNode = self.down
else:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(UP, "UP")
else:
navNode = self.up
self.nodes.append(navNode)
def get(self, i):
if self.p == -1:
self.fillBuffer()
return self.nodes[i]
def LT(self, k):
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
if self.p + k - 1 >= len(self.nodes):
return self.eof
return self.nodes[self.p + k - 1]
def getCurrentSymbol(self):
return self.LT(1)
def LB(self, k):
"""Look backwards k nodes"""
if k == 0:
return None
if self.p - k < 0:
return None
return self.nodes[self.p - k]
def getTreeSource(self):
return self.root
def getSourceName(self):
return self.getTokenStream().getSourceName()
def getTokenStream(self):
return self.tokens
def setTokenStream(self, tokens):
self.tokens = tokens
def getTreeAdaptor(self):
return self.adaptor
def hasUniqueNavigationNodes(self):
return self.uniqueNavigationNodes
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
self.uniqueNavigationNodes = uniqueNavigationNodes
def consume(self):
if self.p == -1:
self.fillBuffer()
self.p += 1
def LA(self, i):
return self.adaptor.getType(self.LT(i))
def mark(self):
if self.p == -1:
self.fillBuffer()
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
if self.p == -1:
self.fillBuffer()
self.p = index
def push(self, index):
"""
Make stream jump to a new location, saving old location.
Switch back with pop().
"""
self.calls.append(self.p) # save current index
self.seek(index)
def pop(self):
"""
Seek back to previous index saved during last push() call.
Return top of stack (return index).
"""
ret = self.calls.pop(-1)
self.seek(ret)
return ret
def reset(self):
self.p = 0
self.lastMarker = 0
self.calls = []
def size(self):
if self.p == -1:
self.fillBuffer()
return len(self.nodes)
# TREE REWRITE INTERFACE
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
self.adaptor.replaceChildren(
parent, startChildIndex, stopChildIndex, t
)
def __str__(self):
"""Used for testing, just return the token type stream"""
if self.p == -1:
self.fillBuffer()
return ' '.join([str(self.adaptor.getType(node))
for node in self.nodes
])
def toString(self, start, stop):
if start is None or stop is None:
return None
if self.p == -1:
self.fillBuffer()
#System.out.println("stop: "+stop);
#if ( start instanceof CommonTree )
# System.out.print("toString: "+((CommonTree)start).getToken()+", ");
#else
# System.out.println(start);
#if ( stop instanceof CommonTree )
# System.out.println(((CommonTree)stop).getToken());
#else
# System.out.println(stop);
# if we have the token stream, use that to dump text in order
if self.tokens is not None:
beginTokenIndex = self.adaptor.getTokenStartIndex(start)
endTokenIndex = self.adaptor.getTokenStopIndex(stop)
# if it's a tree, use start/stop index from start node
# else use token range from start/stop nodes
if self.adaptor.getType(stop) == UP:
endTokenIndex = self.adaptor.getTokenStopIndex(start)
elif self.adaptor.getType(stop) == EOF:
endTokenIndex = self.size() -2 # don't use EOF
return self.tokens.toString(beginTokenIndex, endTokenIndex)
# walk nodes looking for start
i, t = 0, None
for i, t in enumerate(self.nodes):
if t == start:
break
# now walk until we see stop, filling string buffer with text
buf = []
t = self.nodes[i]
while t != stop:
text = self.adaptor.getText(t)
if text is None:
text = " " + self.adaptor.getType(t)
buf.append(text)
i += 1
t = self.nodes[i]
# include stop node too
text = self.adaptor.getText(stop)
if text is None:
text = " " +self.adaptor.getType(stop)
buf.append(text)
return ''.join(buf)
## iterator interface
def __iter__(self):
if self.p == -1:
self.fillBuffer()
for node in self.nodes:
yield node
#############################################################################
#
# tree parser
#
#############################################################################
class TreeParser(BaseRecognizer):
"""@brief Baseclass for generated tree parsers.
A parser for a stream of tree nodes. "tree grammars" result in a subclass
of this. All the error reporting and recovery is shared with Parser via
the BaseRecognizer superclass.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
self.input = None
self.setTreeNodeStream(input)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def setTreeNodeStream(self, input):
"""Set the input stream"""
self.input = input
def getTreeNodeStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
return CommonTree(CommonToken(type=expectedTokenType, text=tokenText))
# precompiled regex used by inContext
dotdot = ".*[^.]\\.\\.[^.].*"
doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*"
dotdotPattern = re.compile(dotdot)
doubleEtcPattern = re.compile(doubleEtc)
def inContext(self, context, adaptor=None, tokenName=None, t=None):
"""Check if current node in input has a context.
Context means sequence of nodes towards root of tree. For example,
you might say context is "MULT" which means my parent must be MULT.
"CLASS VARDEF" says current node must be child of a VARDEF and whose
parent is a CLASS node. You can use "..." to mean zero-or-more nodes.
"METHOD ... VARDEF" means my parent is VARDEF and somewhere above
that is a METHOD node. The first node in the context is not
necessarily the root. The context matcher stops matching and returns
true when it runs out of context. There is no way to force the first
node to be the root.
"""
return _inContext(
self.input.getTreeAdaptor(), self.getTokenNames(),
self.input.LT(1), context)
@classmethod
def _inContext(cls, adaptor, tokenNames, t, context):
"""The worker for inContext.
It's static and full of parameters for testing purposes.
"""
if cls.dotdotPattern.match(context):
# don't allow "..", must be "..."
raise ValueError("invalid syntax: ..")
if cls.doubleEtcPattern.match(context):
# don't allow double "..."
raise ValueError("invalid syntax: ... ...")
# ensure spaces around ...
context = context.replace("...", " ... ")
context = context.strip()
nodes = context.split()
ni = len(nodes) - 1
t = adaptor.getParent(t)
while ni >= 0 and t is not None:
if nodes[ni] == "...":
# walk upwards until we see nodes[ni-1] then continue walking
if ni == 0:
# ... at start is no-op
return True
goal = nodes[ni-1]
ancestor = cls._getAncestor(adaptor, tokenNames, t, goal)
if ancestor is None:
return False
t = ancestor
ni -= 1
name = tokenNames[adaptor.getType(t)]
if name != nodes[ni]:
return False
# advance to parent and to previous element in context node list
ni -= 1
t = adaptor.getParent(t)
# at root but more nodes to match
if t is None and ni >= 0:
return False
return True
@staticmethod
def _getAncestor(adaptor, tokenNames, t, goal):
"""Helper for static inContext."""
while t is not None:
name = tokenNames[adaptor.getType(t)]
if name == goal:
return t
t = adaptor.getParent(t)
return None
def matchAny(self, ignore): # ignore stream, copy of this.input
"""
Match '.' in tree parser has special meaning. Skip node or
entire tree if node has children. If children, scan until
corresponding UP node.
"""
self._state.errorRecovery = False
look = self.input.LT(1)
if self.input.getTreeAdaptor().getChildCount(look) == 0:
self.input.consume() # not subtree, consume 1 node and return
return
# current node is a subtree, skip to corresponding UP.
# must count nesting level to get right UP
level = 0
tokenType = self.input.getTreeAdaptor().getType(look)
while tokenType != EOF and not (tokenType == UP and level==0):
self.input.consume()
look = self.input.LT(1)
tokenType = self.input.getTreeAdaptor().getType(look)
if tokenType == DOWN:
level += 1
elif tokenType == UP:
level -= 1
self.input.consume() # consume UP
def mismatch(self, input, ttype, follow):
"""
We have DOWN/UP nodes in the stream that have no line info; override.
plus we want to alter the exception type. Don't try to recover
from tree parser errors inline...
"""
raise MismatchedTreeNodeException(ttype, input)
def getErrorHeader(self, e):
"""
Prefix error message with the grammar name because message is
always intended for the programmer because the parser built
the input tree not the user.
"""
return (self.getGrammarFileName() +
": node from %sline %s:%s"
% (['', "after "][e.approximateLineInfo],
e.line,
e.charPositionInLine
)
)
def getErrorMessage(self, e, tokenNames):
"""
Tree parsers parse nodes they usually have a token object as
payload. Set the exception token and do the default behavior.
"""
if isinstance(self, TreeParser):
adaptor = e.input.getTreeAdaptor()
e.token = adaptor.getToken(e.node)
if e.token is not None: # could be an UP/DOWN node
e.token = CommonToken(
type=adaptor.getType(e.node),
text=adaptor.getText(e.node)
)
return BaseRecognizer.getErrorMessage(self, e, tokenNames)
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
#############################################################################
#
# tree visitor
#
#############################################################################
class TreeVisitor(object):
"""Do a depth first walk of a tree, applying pre() and post() actions
we go.
"""
def __init__(self, adaptor=None):
if adaptor is not None:
self.adaptor = adaptor
else:
self.adaptor = CommonTreeAdaptor()
def visit(self, t, pre_action=None, post_action=None):
"""Visit every node in tree t and trigger an action for each node
before/after having visited all of its children. Bottom up walk.
Execute both actions even if t has no children. Ignore return
results from transforming children since they will have altered
the child list of this node (their parent). Return result of
applying post action to this node.
The Python version differs from the Java version by taking two
callables 'pre_action' and 'post_action' instead of a class instance
that wraps those methods. Those callables must accept a TreeNode as
their single argument and return the (potentially transformed or
replaced) TreeNode.
"""
isNil = self.adaptor.isNil(t)
if pre_action is not None and not isNil:
# if rewritten, walk children of new t
t = pre_action(t)
for idx in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, idx)
self.visit(child, pre_action, post_action)
if post_action is not None and not isNil:
t = post_action(t)
return t
#############################################################################
#
# streams for rule rewriting
#
#############################################################################
class RewriteRuleElementStream(object):
"""@brief Internal helper class.
A generic list of elements tracked in an alternative to be used in
a -> rewrite rule. We need to subclass to fill in the next() method,
which returns either an AST node wrapped around a token payload or
an existing subtree.
Once you start next()ing, do not try to add more elements. It will
break the cursor tracking I believe.
@see org.antlr.runtime.tree.RewriteRuleSubtreeStream
@see org.antlr.runtime.tree.RewriteRuleTokenStream
TODO: add mechanism to detect/puke on modification after reading from
stream
"""
def __init__(self, adaptor, elementDescription, elements=None):
# Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(),
# which bumps it to 1 meaning no more elements.
self.cursor = 0
# Track single elements w/o creating a list. Upon 2nd add, alloc list
self.singleElement = None
# The list of tokens or subtrees we are tracking
self.elements = None
# Once a node / subtree has been used in a stream, it must be dup'd
# from then on. Streams are reset after subrules so that the streams
# can be reused in future subrules. So, reset must set a dirty bit.
# If dirty, then next() always returns a dup.
self.dirty = False
# The element or stream description; usually has name of the token or
# rule reference that this list tracks. Can include rulename too, but
# the exception would track that info.
self.elementDescription = elementDescription
self.adaptor = adaptor
if isinstance(elements, (list, tuple)):
# Create a stream, but feed off an existing list
self.singleElement = None
self.elements = elements
else:
# Create a stream with one element
self.add(elements)
def reset(self):
"""
Reset the condition of this stream so that it appears we have
not consumed any of its elements. Elements themselves are untouched.
Once we reset the stream, any future use will need duplicates. Set
the dirty bit.
"""
self.cursor = 0
self.dirty = True
def add(self, el):
if el is None:
return
if self.elements is not None: # if in list, just add
self.elements.append(el)
return
if self.singleElement is None: # no elements yet, track w/o list
self.singleElement = el
return
# adding 2nd element, move to list
self.elements = []
self.elements.append(self.singleElement)
self.singleElement = None
self.elements.append(el)
def nextTree(self):
"""
Return the next element in the stream. If out of elements, throw
an exception unless size()==1. If size is 1, then return elements[0].
Return a duplicate node/subtree if stream is out of elements and
size==1. If we've already used the element, dup (dirty bit set).
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup
el = self._next()
return self.dup(el)
# test size above then fetch
el = self._next()
return el
def _next(self):
"""
do the work of getting the next element, making sure that it's
a tree node or subtree. Deal with the optimization of single-
element list versus list of size > 1. Throw an exception
if the stream is empty or we're out of elements and size>1.
protected so you can override in a subclass if necessary.
"""
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self): # out of elements?
if len(self) == 1: # if size is 1, it's ok; return and we'll dup
return self.toTree(self.singleElement)
# out of elements and size was not 1, so we can't dup
raise RewriteCardinalityException(self.elementDescription)
# we have elements
if self.singleElement is not None:
self.cursor += 1 # move cursor even for single element list
return self.toTree(self.singleElement)
# must have more than one in list, pull from elements
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
return o
def dup(self, el):
"""
When constructing trees, sometimes we need to dup a token or AST
subtree. Dup'ing a token means just creating another AST node
around it. For trees, you must call the adaptor.dupTree() unless
the element is for a tree root; then it must be a node dup.
"""
raise NotImplementedError
def toTree(self, el):
"""
Ensure stream emits trees; tokens must be converted to AST nodes.
AST nodes can be passed through unmolested.
"""
return el
def hasNext(self):
return ( (self.singleElement is not None and self.cursor < 1)
or (self.elements is not None
and self.cursor < len(self.elements)
)
)
def size(self):
if self.singleElement is not None:
return 1
if self.elements is not None:
return len(self.elements)
return 0
__len__ = size
def getDescription(self):
"""Deprecated. Directly access elementDescription attribute"""
return self.elementDescription
class RewriteRuleTokenStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def toTree(self, el):
# Don't convert to a tree unless they explicitly call nextTree.
# This way we can do hetero tree nodes in rewrite.
return el
def nextNode(self):
t = self._next()
return self.adaptor.createWithPayload(t)
def nextToken(self):
return self._next()
def dup(self, el):
raise TypeError("dup can't be called for a token stream.")
class RewriteRuleSubtreeStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def nextNode(self):
"""
Treat next element as a single node even if it's a subtree.
This is used instead of next() when the result has to be a
tree root node. Also prevents us from duplicating recently-added
children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
must dup the type node, but ID has been added.
Referencing a rule result twice is ok; dup entire tree as
we can't be adding trees as root; e.g., expr expr.
Hideous code duplication here with super.next(). Can't think of
a proper way to refactor. This needs to always call dup node
and super.next() doesn't know which to call: dup node or dup tree.
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup (at most a single node
# since this is for making root nodes).
el = self._next()
return self.adaptor.dupNode(el)
# test size above then fetch
el = self._next()
return el
def dup(self, el):
return self.adaptor.dupTree(el)
class RewriteRuleNodeStream(RewriteRuleElementStream):
"""
Queues up nodes matched on left side of -> in a tree parser. This is
the analog of RewriteRuleTokenStream for normal parsers.
"""
def nextNode(self):
return self._next()
def toTree(self, el):
return self.adaptor.dupNode(el)
def dup(self, el):
# we dup every node, so don't have to worry about calling dup; short-
#circuited next() so it doesn't call.
raise TypeError("dup can't be called for a node stream.")
class TreeRuleReturnScope(RuleReturnScope):
"""
This is identical to the ParserRuleReturnScope except that
the start property is a tree nodes not Token object
when you are parsing trees. To be generic the tree node types
have to be Object.
"""
def __init__(self):
self.start = None
self.tree = None
def getStart(self):
return self.start
def getTree(self):
return self.tree
| avatar29A/pyfuzzy | antlr3/tree.py | Python | mit | 78,283 | [
"VisIt"
] | 033ecb8b87de9d8162c0791bb99d3bde20fe8c64b974f4f8b7760e622031172a |
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Index.py
This module provides a way to create indexes to text files.
Classes:
Index Dictionary-like class used to store index information.
_ShelveIndex An Index class based on the shelve module.
_InMemoryIndex An in-memory Index class.
"""
import os
import array
import shelve
try:
import cPickle as pickle # Only available under Python 2
except ImportError:
import pickle # Python 3
class _ShelveIndex(dict):
"""An index file wrapped around shelve.
"""
# Without a good dbm module installed, this is pretty slow and
# generates large files. When generating an index on a FASTA-
# formatted file with 82000 sequences (37Mb), the
# index 'dat' file is 42Mb and 'dir' file is 8Mb.
__version = 2
__version_key = '__version'
def __init__(self, indexname, truncate=None):
dict.__init__(self)
try:
if truncate:
# In python 1.52 and before, dumbdbm (under shelve)
# doesn't clear the old database.
files = [indexname + '.dir',
indexname + '.dat',
indexname + '.bak'
]
for file in files:
if os.path.exists(file):
os.unlink(file)
raise Exception("open a new shelf")
self.data = shelve.open(indexname, flag='r')
except:
# No database exists.
self.data = shelve.open(indexname, flag='n')
self.data[self.__version_key] = self.__version
else:
# Check to make sure the database is the correct version.
version = self.data.get(self.__version_key, None)
if version is None:
raise IOError("Unrecognized index format")
elif version != self.__version:
raise IOError("Version %s doesn't match my version %s"
% (version, self.__version))
def __del__(self):
if 'data' in self.__dict__:
self.data.close()
class _InMemoryIndex(dict):
"""This creates an in-memory index file.
"""
# File Format:
# version
# key value
# [...]
__version = 3
__version_key = '__version'
def __init__(self, indexname, truncate=None):
self._indexname = indexname
dict.__init__(self)
self.__changed = 0 # the index hasn't changed
# Remove the database if truncate is true.
if truncate and os.path.exists(indexname):
os.unlink(indexname)
self.__changed = 1
# Load the database if it exists
if os.path.exists(indexname):
with open(indexname) as handle:
version = self._toobj(handle.readline().rstrip())
if version != self.__version:
raise IOError("Version %s doesn't match my version %s"
% (version, self.__version))
for line in handle:
key, value = line.split()
key, value = self._toobj(key), self._toobj(value)
self[key] = value
self.__changed = 0
def update(self, dict):
self.__changed = 1
dict.update(self, dict)
def __setitem__(self, key, value):
self.__changed = 1
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self.__changed = 1
dict.__delitem__(self, key)
def clear(self):
self.__changed = 1
dict.clear(self)
def __del__(self):
if self.__changed:
with open(self._indexname, 'w') as handle:
handle.write("%s\n" % self._tostr(self.__version))
for key, value in self.items():
handle.write("%s %s\n" %
(self._tostr(key), self._tostr(value)))
def _tostr(self, obj):
# I need a representation of the object that's saveable to
# a file that uses whitespace as delimiters. Thus, I'm
# going to pickle the object, and then convert each character of
# the string to its ASCII integer value. Then, I'm going to convert
# the integers into strings and join them together with commas.
# It's not the most efficient way of storing things, but it's
# relatively fast.
s = pickle.dumps(obj)
intlist = array.array('b', s)
return ','.join(str(i) for i in intlist)
def _toobj(self, str):
intlist = [int(i) for i in str.split(',')]
intlist = array.array('b', intlist)
return pickle.loads(''.join(chr(i) for i in intlist))
Index = _InMemoryIndex
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/Index.py | Python | gpl-2.0 | 4,948 | [
"Biopython"
] | 0422d8ab9a21f7a7bac20f69d9850ebe71376834b92e9ad8753d00d5e6c0ef33 |
#!/usr/bin/python2.6
# -*- coding: utf8 -*-
"""
"""
__author__ = "Jérôme Samson"
__copyright__ = "Copyright 2014, Mikros Image"
import os
import sys
import csv
import datetime
import logging
from logging import handlers
from optparse import OptionParser
try:
import simplejson as json
except ImportError:
import json
from tornado.httpclient import HTTPClient, HTTPError
from octopus.dispatcher import settings
from octopus.core import singletonconfig
###########################################################################################################################
# Data example:
# {
# "date": timestamp
# "licenses": "{\"shave\" : \"0 / 70\",\"nuke\" : \"0 / 70\",\"clarisse\" : \"0 / 5\",\"mtoa\" : \"137 / 195\",\"katana\" : \"24 / 200\",\"ocula\" : \"0 / 3\"}",
# "rendernodes":
# {
# "renderNodesByStatus":
# {
# "Paused": 89,
# "Working": 152,
# "Unknown": 51,
# "Assigned": 0,
# "Idle": 15,
# "Booting": 0,
# "Finishing": 0
# },
# "totalCores": 5192,
# "missingRenderNodes": 51,
# "idleCores": 1844
# },
# "commands":
# {
# "ASSIGNED": 0,
# "CANCELED": 38926,
# "RUNNING": 151,
# "DONE": 67467,
# "TIMEOUT": 0,
# "ERROR": 115,
# "READY": 5455,
# "FINISHING": 0,
# "TOTAL": 117238,
# "BLOCKED": 5124
# },
# "jobs":
# {
# "total": 2519
# }
# }
def process_args():
'''
Manages arguments parsing definition and help information
'''
usage = ""
desc="""Retrieves stats info of the server (http://puliserver:8004/stats) and append it in the usage_stats.log file.
This is generally used in a cron script to grab renderfarm usage data over time. It can then be processed to generate several graphs."""
parser = OptionParser(usage=usage, description=desc, version="%prog 0.1" )
parser.add_option( "-v", action="store_true", dest="verbose", help="Verbose output" )
parser.add_option( "-s", "--server", action="store", dest="hostname", default="pulitest", help="Specified a target host to send the request")
parser.add_option( "-p", "--port", action="store", dest="port", type="int", default=8004, help="Specified a target port")
parser.add_option( "-o", action="store", dest="outputFile", default=os.path.join(settings.LOGDIR, "usage_stats.log"), help="Target output file." )
options, args = parser.parse_args()
return options, args
if __name__ == "__main__":
options, args = process_args()
singletonconfig.load( settings.CONFDIR + "/config.ini" )
#
# Prepare request and store result in log file
#
_request = "http://%s:%s/stats" % ( options.hostname, options.port )
_logPath = os.path.join( options.outputFile )
# fileHandler = logging.handlers.RotatingFileHandler( _logPath,
# maxBytes=20000000,
# backupCount=1,
# encoding="UTF-8")
fileHandler = logging.FileHandler( _logPath, encoding="UTF-8")
fileHandler.setFormatter( logging.Formatter('%(message)s') )
statsLogger = logging.getLogger('stats')
statsLogger.addHandler( fileHandler )
statsLogger.setLevel( singletonconfig.get('CORE','LOG_LEVEL') )
http_client = HTTPClient()
try:
response = http_client.fetch( _request )
if response.error:
print "Error: %s" % response.error
print " %s" % response.body
else:
if response.body == "":
print "Error: No stats retrieved"
else:
tmp = json.loads(response.body)
del tmp["jobs"]
del tmp["commands"]
del tmp["licenses"]
for license in tmp["licensesDict"]:
del license["rns"]
statsLogger.warning( json.dumps(tmp) )
except HTTPError, e:
print "Error:", e
del(http_client)
| mikrosimage/OpenRenderManagement | src/pulitools/stats/grab_usage_stats.py | Python | bsd-3-clause | 4,543 | [
"Octopus"
] | 2ba62d2d4474d8a7ed5105db6aa7fb966c78a002dbf259b4b07f3a7bffb5b323 |
#! /usr/bin/env python
import sys
from Settings import Settings
from HTML import HTML
import logging
import Tools.web as web
from Assigner import ParserAssigner
import Tools.IO as IO
from Top import Top
#DONE read/write E and gradients from/to .xyz file; make plots if E/dE available
#DONE rearrange geometry optimization buttons on the web page
#DONE add UI element to play geometries faster
#DONE recognize forward/reverse directions of IRC, assign negative sign to the reverse one
#DONE recognize Energy: record in .xyz comments
#DONE clean up b2 section of .xyz file when big irc file is shown
#DONE clean terse-pics folder before writing
#DONE add support for conversion to kcal/mol in .xyz comments
#DONE Write supplementary IRC classes
#DONE XYZ: make it to recognize synonims for e and grad
#DONE Write supplementary Geometry classes (+Gau)
#DONE Remove unnecessary solvation n/a
#DONE Put IRC direction in title
#DONE Geometry-specific comments should be stored in Geom objects and printed from Geom.comment
#DONE In Gaussian.py and ElectronicStructure.py, organize scan using object Scan()
#DONE show maxima/minima on scans
#DONE Gaussian.py: IRC gradients: used Delta-E instead
#DONE Gaussian.py: IRC gradients: search for minima on smoothed curve
#DONE check showPic for number of points
#DONE add charges support
#DONE stability support
#DONE Charges: Add charges of hydrocarbon H atoms to heavy atoms
#DONE add support for gs2 and dvv IRC options
#DONE Show largest amplitudes in Gau
#DONE Figure out if we can skip parsing of some big useless blocks in .log file
#DONE add .chk isosurfaces module
#DONE work with gaussian checkpoint files (in the same manner as .log files)
#DONE parse .nbout files
#DONE Charges: figure out the best way of organizing UI for charges
#DONE show NBO interactions
#DONE Unfold menu with list of geoms to buttons
#DONE Support JSmol
#DONE Resize Jmol window
# Top priority
#TODO Add backup functionality
#TODO Support JVXL format
#TODO show TDDFT orbitals involved in excitation
#TODO Write a python script to parallelize cubegen
#TODO work with gzipped files
#TODO Make html and gp files as templates
# To do next
#TODO connectivity mode
# Reimplement functionality:
# Nice featrues to add:
#TODO what are Natural Transition Orbitals?
#TODO attach NRT module
#TODO Scan recursively and recreate the folder hierarchy
#TODO add cclib support
#TODO add AIM support
#TODO recognize file type by content
#TODO Support 2D scan plots
#TODO support mp2 and semiempirics calculations
#TODO Show electronic state of the wavefunction if it has any symmetry
#TODO Make second run for terse to parse .xyz files in terse-pics to merge scans/ircs
#TODO User interface: use buttons; they don't change their state!
#TODO fchkgaussian: Instead of looking for predefined densities (SCF, MP2, etc), parse them!
#TODO IRC: add key to sort geometris!
#TODO Think about using /tmp dir and clean up procedure
#TODO Show energies in optimization convergence plot
#TODO Show molecule in 2D if NBO analysis is done
#TODO Export text information
#TODO Show vectors
#TODO NBO results: show correct topology
#TODO installer
#TODO self-doctor
# Bugs
# [ ] Jmol does not show array of text labels
# Postponed
#XXX Show IRC as text in Jmol window
# Problem: cannot echo variable in jmol; need to subscribe to jmol-users and ask about that
#XXX Find the way to set Jmol interactive elements from inside html code
# Rejected
#XXX Apply bond orders manually if NBO_topology present
# What's the use?
#XXX Gaussian Merge IRC files - For now, I don't think it is really a good idea.
# Instead, .xyz files produced by terse.py should be merged (they have all neccessary information like e, x, and grad in comments)
# Advantage of this approach is ESS independence
#XXX Gaussian: Figure out why post_hf lot is determined incorrectly and energies do not show up
# For now this functionality does not look necessary, as we rarely use MP2 and CI, and there exist better programs for CC and MR
#XXX color logging:
# Gives nothing to functionality but might add issues with OS compatibility
#XXX T1 diagnostics
# Gaussian does not show t1 diagnostic by default, and it can not be activated in CBS-QB3
# procedure, so for now showing t1 diagnostics would not be very helpful
#XXX write topology to .mol file
# Using different file formats is inconvinient, and implementing topologies does not worth that mess.
#debug = 'DEBUG'
settings = Settings(FromConfigFile = True)
Top.settings = settings
files = settings.parseCommandLine(sys.argv[1:])
if settings.debug:
debug = 'DEBUG'
else:
debug = 'INFO'
log = logging.getLogger('terse.py')
DebugLevel = getattr(logging,debug)
lf = '[%(levelname)s: %(funcName)s at %(filename)s +%(lineno)s] %(message)s'
logging.basicConfig(format=lf ,level=DebugLevel)
absolute_tersepic = settings.OutputFolder + '/' + settings.tersepic
if not IO.prepareDir(settings.OutputFolder):
sys.exit()
if not IO.prepareDir(absolute_tersepic):
sys.exit()
IO.cleanDir(absolute_tersepic)
WebPage = HTML()
WebPage.readTemplate()
for fl in files:
log.debug('Opening ' + str(fl))
if fl[0] == 'file':
if not IO.all_readable(fl[1:]):
continue
f = ParserAssigner().typeByExt(fl)
else:
f = ParserAssigner().typeByCommandLine(fl)
f.file = fl[1]
try:
f.parse()
f.postprocess()
b1, b2 = f.webData()
except settings.exc_type, e:
log.error('Parsing %s failed!' % (str(fl)))
continue
wb1 = WebPage.addLeftDiv(b1)
wb2 = WebPage.addRightDiv(b2)
WebPage.addDivRowWrapper(str(f.file)+web.brn+wb1,wb2)
if settings.usage:
f.usage()
settings.counter += 1
settings.subcounter = 0
# Remove cube files from terse-pics
# IO.cleanCube(absolute_tersepic)
WebPage.finalize()
WebPage.write()
| mtthwflst/terse | terse.py | Python | mit | 5,906 | [
"Gaussian",
"Jmol",
"cclib"
] | cc7e7456f11db5a1e04efa454e7b4d1f68e6a0dd44cf9f2b56f37b531893eddc |
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import vtk
from ..base import PluginBase
from gui.qt.textaction import TextAction
class TextPlugin(PluginBase):
def __init__(self, ilsa):
logging.debug("In TextPlugin::__init__()")
self._name = None
self._action = TextAction(ilsa)
ilsa.add(self)
self._ilsa = ilsa
@property
def ilsa(self):
logging.debug("In TextPlugin::ilsa()")
return self._ilsa
@property
def action(self):
logging.debug("In TextPlugin::action()")
return self._action
@property
def name(self):
logging.debug("In TextPlugin::name()")
return self._name
@name.setter
def name(self, name):
logging.debug("In TextPlugin::name.setter()")
self._name = name
def notify(self, vtkInteractorStyle=None):
logging.debug("In TextPlugin::notify()")
def save(self):
logging.debug("In TextPlugin::save()")
value = self._action.save()
save = {"type" : self.type, "value" : value}
return save
def restore(self, value=None):
logging.debug("In TextPlugin::restore()")
if value:
self._action.restore(value)
@property
def description(self):
logging.debug("In TextPlugin::description()")
return "..."
@property
def separator(self):
logging.debug("In TextPlugin::separator()")
return False
@property
def status(self):
logging.debug("In TextPlugin::status()")
return True
def removeScene(self, scene):
self._action.removeScene(scene)
| aevum/moonstone | src/moonstone/ilsa/plugins/text/text.py | Python | lgpl-3.0 | 2,543 | [
"VTK"
] | f6376d51460647d98c1d02744ca3632b053207be88033f7ce723c24584c897ad |
# -*- coding: UTF8 -*-
# Este arquivo é parte do programa Enplicaw
# Copyright 2013-2015 Carlo Oliveira <carlo@nce.ufrj.br>,
# `Labase <http://labase.selfip.org/>`__; `GPL <http://is.gd/3Udt>`__.
#
# Enplicaw é um software livre; você pode redistribuí-lo e/ou
# modificá-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, veja em <http://www.gnu.org/licenses/>
from random import shuffle, random, choice
import operator
from constants import PRIMES, DATA
__author__ = 'carlo'
__version__ = "0.3.0"
RND = 3141
ZFATOR = 2 # 2 * FATOR
TOP = 50
ZO = 3
LGN = -1000 # "large geniculated nucleus" # Route retina conections into cortex
def tupler(x):
return [(bit,) + tup for bit in (0, 1) for tup in tupler(x - 1)] if x else [(0,), (1,)]
class Wisard:
"""Rede neural sem peso. :ref:`wisard'
"""
def __init__(self, data, retinasize=3 * 4, bleach=0, mapper={i: i for i in range(4)}, enf=1, sup=0,
unsupervised=False):
self.data = data
self.bleacher, self.enf, self.sup, self.retinasize, self.unsupervised = \
mapper, enf, sup, retinasize, unsupervised
self.auto_bleach = {}
self.bleach = bleach
self.clazzes = list(mapper.keys())
class Cortex:
def __init__(self, data, clazz, bleach):
self.data, self.clazz, self.bleacher, self.cortex = data, clazz, bleach, [{(0, 0): []}]
self.reset_cortex()
def reset_cortex(self):
lgn = large_geniculated_nucleus = list(range(retinasize))
self.cortex = [{(a, b): 0 if not b == LGN else lgn.pop(RND % len(lgn))
for a in [0, 1] for b in [0, 1, LGN]} for _ in range(retinasize // 2)]
def learn(self, sample_clazz, master_retina):
cortex, clazz, enf, sup = self.cortex, self.clazz, self.enf, self.sup
for neuron in cortex:
neuron[(master_retina[neuron[(0, LGN)]], master_retina[neuron[(1, LGN)]])
] += enf if sample_clazz == clazz else sup if cls != "N" else 0
def classify(self, retina):
retina = self.data
if not retina:
return
return {self.clazz: sum(
neuron[(retina[neuron[(0, LGN)]], retina[neuron[(1, LGN)]])]
for neuron in self.cortex) - len(retina) * (self.bleach + self.bleacher)}
self.cortex = [Cortex(data, clazz, bleach) for clazz, bleach in mapper.items()]
self.reset_brain()
def reset_brain(self):
[cortex.reset_cortex() for cortex in self.cortex]
# self.auto_bleach = {key: 1 for key in self.clazzes}
def _update_balance(self):
for clazz in self.clazzes:
auto = sum(next(ram.values() for ram in self.cortex[clazz]))
print(clazz, auto)
self.auto_bleach[clazz] = auto if auto else 1
return None
def learn_samples(self):
enf, sup, samples, unsupervised, clazzes = self.enf, self.sup, self.data, self.unsupervised, self.clazzes
print(samples[0])
cortices = [(cortex.clazz, cortex.cortex) for cortex in self.cortex]
for _, sample_clazz, master_retina in samples:
if unsupervised:
sample_clazz = choice(clazzes)
print(sample_clazz)
if sample_clazz:
for clazz, cortex in cortices:
for neuron in cortex:
neuron[(master_retina[neuron[(0, LGN)]], master_retina[neuron[(1, LGN)]])
] += enf if sample_clazz == clazz else sup if sample_clazz != "N" else 0
def rank_samples(self):
histo_classes = {clazz: [] for clazz in self.clazzes}
res = self.classify_samples()
[histo_classes[cl].append((s, name)) for name, _, line in res for cl, s in line.items()]
ordered_histos = {}
ordered_notes = {}
ordered_cutter = {}
for clazz, histo in histo_classes.items():
histo.sort()
minh = histo[0][0]
ordered_histos[clazz] = [name for _, name in histo]
ordered_notes[clazz] = [abs(10*(noteh - note)/max(1, note))
for (noteh, _), (note, _) in zip(histo, histo[1:])]
print(clazz, [name for _, name in histo], [note - minh for note, _ in histo], ordered_notes[clazz])
ordered_cutter[clazz] = ordered_notes[clazz].index(max(ordered_notes[clazz]))
ranker = {}
for sample, clazz, _ in self.data:
rank = [(histo.index(sample) if histo.index(sample) > ordered_cutter[clazz] else histo.index(sample)//4,
clazz) for clazz, histo in ordered_histos.items()]
rank.sort(reverse=True)
ranker[sample] = [clazz, rank[0][1]]
print(sample, rank)
return ranker
def classify_samples(self):
bleach, retinasize, samples = self.bleach, self.retinasize, self.data
print("classify_samples", samples[0])
cortices = [(cortex.clazz, cortex.bleacher, cortex.cortex) for cortex in self.cortex]
return [
(name, sample_clazz,
{clazz: sum(
neuron[(retina[neuron[(0, LGN)]], retina[neuron[(1, LGN)]])]
for neuron in cortex) - retinasize * (bleach + bleacher)}
) for clazz, bleacher, cortex in cortices
for name, sample_clazz, retina in samples]
def run(self):
self.reset_brain()
self.learn_samples() # [:8])
# self.update_balance()
res = self.classify_samples()
return res
def main(self, namer=-1):
global RND
histo_classes = {clazz: [] for clazz in self.clazzes}
clazzes = self.clazzes + ["U"]
tot = {u[0]: {key: 0 if key != "U" else str(u[0]) + " " + str(u[1]) for key in clazzes} for u in
self.data}
primes = PRIMES[:]
for _ in range(1):
# shuffle(data)
RND = primes.pop()
res = self.run()
[histo_classes[cl].append((s, name)) for name, _, line in res for cl, s in line.items()]
[tot[name].update({cl: tot[name][cl] + s for cl, s in line.items()}) for name, _, line in res]
total = list(tot.keys())
total.sort()
total_conf = 0
total_sec = 0
for line in total:
val = dict(tot[line])
user = val.pop("U")[namer:] if "U" in val else ""
val = list(val.items())
# print(val)
val.sort(key=operator.itemgetter(1), reverse=True)
first, sec, third = val[0][1], val[1][1], val[2][1]
confidence = min(100 * abs(first - sec) // max(abs(first), 1), 100)
conf = confidence if (user == val[0][0][namer:]) or ("e" == user) else -2*confidence
secd = min(abs(sec // max(abs(first), 1)) * conf, 100) # if (user == val[0][0]) or ("e" == user) else 0
# conf = 100 * abs(first-sec) // max(abs(first), abs(sec))
# conf = 100 * (max(first, 0)-max(sec, 0)) // first
total_conf += conf
total_sec += secd
# print(tot[line]["U"] + " " + "".join(["%s:%8.0f " % (a[-3:], b) for a, b in val]), "conf: %d" % conf)
print("{name: >42} {val} conf: {conf}".format(name=tot[line]["U"] if "U" in tot[line] else "",
val="".join(["%s:%8.0f " % (a[-3:], b) for a, b in val]),
conf=conf))
print("total confidence %f" % (1.0 * total_conf / len(total)))
ordered_histos = {}
ordered_notes = {}
ordered_cutter = {}
for clazz, histo in histo_classes.items():
histo.sort()
minh = histo[0][0]
ordered_histos[clazz] = [name for _, name in histo]
ordered_notes[clazz] = [abs(10*(noteh - note)/note) for (noteh, _), (note, _) in zip(histo, histo[1:])]
print(clazz, [name for _, name in histo], [note - minh for note, _ in histo], ordered_notes[clazz])
ordered_cutter[clazz] = ordered_notes[clazz].index(max(ordered_notes[clazz]))
for sample in range(148):
rank = [(histo.index(sample) if histo.index(sample) > ordered_cutter[clazz] else histo.index(sample)//4,
clazz) for clazz, histo in ordered_histos.items()]
rank.sort(reverse=True)
print(sample, rank)
return
def unsupervised_class(self, namer=-1):
self.reset_brain()
self.learn_samples()
self.rank_samples()
def retinify_samples(self, samples):
[self.retinify(sample[2:]) for sample in samples]
@staticmethod
def retinify(retina, threshold=32, band=8, zoom=4):
def retinate(value, pix=0, bnd=0):
return [pix] * int(bnd + (1 - pix) * float(value) * zoom // ZFATOR)
def deretinate(value, pix=0):
return [pix] * (TOP - (band + int(float(value) * zoom // ZFATOR)))
# print(retina, [(int(float(ZO * v) // ZFATOR), (TOP - (2 * int(float(ZO * v) // ZFATOR)))) for v in retina])
retina = [
(retinate(value) + retinate(value, 1, band) + deretinate(value))[:threshold]
for value in retina]
return [pix for line in retina for pix in line]
@staticmethod
def sense_domain(data):
def updater(lobe, index, off):
return {index: lobe[index] + off}
data = [[float(p) for p in line.split(",")[:-1]] for i, line in enumerate(data)]
retina = Wisard.retinify(data[0])
lobe = [{(a, b): 0 for a in [0, 1] for b in [0, 1]} for _ in range(len(retina) // 2)]
master_retina = [0 for line in range(len(retina))]
for sample in data:
retina = Wisard.retinify(sample)
[master_retina.__setitem__(pix, master_retina[pix]+retina[pix]) for pix in range(len(master_retina))]
[neuron.update(
updater(neuron, (retina.pop(RND % len(retina)), retina.pop(RND % len(retina))), 1))
for neuron in lobe]
domain = list(set(master_retina[:]))
domain.sort()
domain = [(tre, sum(1 for pix in master_retina if tre == pix)) for tre in domain]
print(domain, len(master_retina), len(data), len(data[0]), sum(dm[1] for dm in domain[1:-1]))
domain = list(set([val for neuron in lobe for val in neuron.values()]))
domain.sort()
domain = [(tre, sum(1 for neuron in lobe for val in neuron.values() if tre == val)) for tre in domain]
print(domain, len(lobe), sum(dm[1] for dm in domain[1:-1])), sum(dm[0] for dm in domain[1:-1])
return Wisard.split_classes(domain, lobe, master_retina)
@staticmethod
def split_classes(domain, lobe, master_retina):
cutter = sum(dm[0]*dm[1] for dm in domain[1:-1])//2
lower_half = []
higher_half = []
wheighted_sum = 0
for wheight, count in domain[1:-1]:
if wheighted_sum > cutter:
break
wheighted_sum += wheight * count
[lower_half.append(neuron) if wheighted_sum < cutter else higher_half.append(neuron) for neuron in lobe
if any(neuron[(a, b)] == wheight for a in [0, 1] for b in [0, 1])]
print(cutter, len(lower_half), len(higher_half), wheighted_sum)
show([1 if pix else 0 for pix in master_retina])
return {"l": lower_half, "h": higher_half}
def unsupervised_learn(self, data):
clazzes = self.sense_domain(data)
self.cortex = clazzes
self.bleacher = {key: 0 for key in clazzes.keys()}
samples = [[i, line.split(",")[-1]] + [float(p) for p in line.split(",")[:-1]] for i, line in enumerate(data)]
result = self.classify_samples()
for line in result:
print(line)
print ("##################################################################")
data = [dt for dt, rs in zip(data, result) if rs[2]["h"] == 253]
clazzes = self.sense_domain(data)
self.cortex = clazzes
self.bleacher = {key: 0 for key in clazzes.keys()}
data = [[i, line.split(",")[-1]] + [float(p) for p in line.split(",")[:-1]] for i, line in enumerate(data)]
result = self.classify_samples()
for line in result:
print(line)
def show(retina):
for i in range(32):
print("".join([str(retina[j + 32 * i]) for j in range(32)]))
return
def plot(data):
import matplotlib.pyplot as plt
from math import pi
step = 2*pi/125
theta = [ang*step for ang in range(125)]
fig = plt.figure(figsize=(9, 9))
fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05)
for n, (title, case_data) in enumerate(data):
print("plot(data)", title, len(case_data))
ax = fig.add_subplot(2, 2, n + 1, projection='polar')
# plt.rgrids([0.2, 0.4, 0.6, 0.8])
ax.set_title(title, weight='bold', size='medium', position=(0.5, 1.1),
horizontalalignment='center', verticalalignment='center')
for color, line in zip(COLORS, case_data):
ax.plot(theta, line, color=color, linewidth=2)
ax.set_rmax(15.0)
ax.grid(True)
# add legend relative to top-left plot
# plt.subplot(2, 2, 1)
# labels = ('Factor 1', 'Factor 2', 'Factor 3', 'Factor 4', 'Factor 5')
# legend = plt.legend(labels, loc=(0.9, .95), labelspacing=0.1)
# plt.setp(legend.get_texts(), fontsize='small')
plt.figtext(0.5, 0.965, 'Classes de aluno segundo a transitividade',
ha='center', color='black', weight='bold', size='large')
plt.show()
def main(data, unsupervised=False):
global RND
cls = "Iris-setosa Iris-versicolor Iris-virginica".split()
data = [(i, line.split(",")[-1], Wisard.retinify([float(p) for p in line.split(",")[:-1]]))
for i, line in enumerate(data)]
bleacher = {"Iris-setosa": 0, "Iris-versicolor": 0, "Iris-virginica": 0}
w = Wisard(data, 22 * 4, bleach=0, mapper=bleacher, enf=1, sup=0, unsupervised=unsupervised)
# bleacher = {"Iris-setosa": 280, "Iris-versicolor": 85, "Iris-virginica": 30}
# w = Wisard(data, 22 * 4, bleach=2458, mapper=bleacher, enf=100, sup=10, unsupervised=unsupervised)
# w.main(-3)
w.unsupervised_class()
if __name__ == '__main__':
main(DATA, unsupervised=True)
# Wisard.sense_domain(DATA)
# Wisard().unsupervised_learn(DATA)
| cetoli/enplicaw | src/enplicaw/enplicaw.py | Python | gpl-2.0 | 15,050 | [
"NEURON"
] | 3d7d9373dc8a2ef10c963c4340266730eb8caca9ad2f0b183977b609a3bc66c6 |
"""
Traits View definition file.
The view trait of the parent class is extracted from the model definition
file. This file can either be exec()ed or imported. See
core/base.py:Base.trait_view() for what is currently used. Using exec()
allows view changes without needing to restart Mayavi, but is slower than
importing.
"""
# Authors: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Judah De Paula <judah@enthought.com>
# Copyright (c) 2005-2015, Enthought, Inc.
# License: BSD Style.
from traits.etsconfig.api import ETSConfig
from traitsui.api \
import Item, Group, View, ImageEnumEditor, InstanceEditor, HGroup
from mayavi.core.lut_manager import lut_mode_list, \
lut_image_dir
def _number_of_lut_cols():
return 1 if ETSConfig.toolkit == 'qt4' else 6
# The view of the LUT Manager object.
view = View(Group(Item(name='lut_mode',
editor=ImageEnumEditor(values=lut_mode_list(),
cols=_number_of_lut_cols(),
path=lut_image_dir)),
Item(name='file_name', visible_when="lut_mode=='file'"),
Item(name='number_of_colors'),
Item(name='reverse_lut'),
Item(name='lut',
show_label=False,
editor=InstanceEditor(label='Edit LUT properties',
id='mayavi.core.lut_manager.edit_lut')),
Item(name='scalar_bar_representation',
show_label=False,
visible_when='scalar_bar_representation is not None',
editor=InstanceEditor(label='Edit Legend representation',
id='mayavi.core.lut_manager.edit_represetation')),
Item(name='create_lut', show_label=False),
Group(Item(name='show_legend'),
Group(
Item(name='number_of_labels'),
enabled_when='show_scalar_bar==True',
),
Group(
Item(name='shadow'),
Item(name='use_default_name'),
Item(name='data_name',
enabled_when='not object.use_default_name'),
HGroup(
Item(name='_title_text_property',
show_label=False,
editor=InstanceEditor(label='Edit bar Title',
id='mayavi.core.lut_manager.bar_title_text')),
Item(name='_label_text_property',
show_label=False,
editor=InstanceEditor(label='Edit bar Text',
id='mayavi.core.lut_manager.bar_label_text'),
label='Edit bar Text'),
),
HGroup(
Item(name='scalar_bar',
show_label=False,
editor=InstanceEditor(label='Edit bar Actor',
id='mayavi.core.lut_manager.bar_actor'),
),
Item(name='scalar_bar_widget',
show_label=False,
editor=InstanceEditor(label='Edit bar Widget',
id='mayavi.core.lut_manager.bar_widget'),
),
),
enabled_when='show_scalar_bar==True',
),
show_border=True,
),
Group(
Item(name='use_default_range'),
Item(name='data_range',
enabled_when='not object.use_default_range'),
show_border=True,
),
label='LUT (Look Up Table) Manager',
),
# Delete this once we're sure we want to keep the new integrated format.
# Group(Item(name='_title_text_property',
# style='custom',
# resizable=True),
# show_labels=False,
# defined_when='show_scalar_bar==True',
# label='Title'),
# Group(Item(name='_label_text_property',
# style='custom',
# resizable=True),
# enabled_when='show_scalar_bar==True',
# show_labels=False,
# label='Labels'),
resizable=True,
)
| dmsurti/mayavi | mayavi/core/ui/lut_manager.py | Python | bsd-3-clause | 4,874 | [
"Mayavi"
] | 5877b6742142dba315f820be51c1483178b4a20181a44086009132eb90089ec5 |
from AritVisitor import AritVisitor
from AritParser import AritParser
class UnknownIdentifier(Exception):
pass
class MyAritVisitor(AritVisitor):
def __init__(self):
self._memory = dict() # store id -> values
def visitNumberAtom(self, ctx):
try:
value = int(ctx.getText())
return value
except ValueError:
return float(ctx.getText())
def visitIdAtom(self, ctx):
try:
return self._memory[ctx.getText()]
except KeyError:
raise UnknownIdentifier(ctx.getText())
def visitMultiplicationExpr(self, ctx):
leftval = self.visit(ctx.expr(0))
rightval = self.visit(ctx.expr(1))
# an elegant way to match the token:
if (ctx.mdop.type == AritParser.MULT):
return leftval*rightval
else:
return leftval/rightval
def visitAdditiveExpr(self, ctx):
leftval = self.visit(ctx.expr(0))
rightval = self.visit(ctx.expr(1))
if (ctx.pmop.type == AritParser.PLUS):
return leftval+rightval
else:
return leftval-rightval
def visitExprInstr(self, ctx):
val = self.visit(ctx.expr())
print('The value is '+str(val))
def visitParens(self, ctx):
return self.visit(ctx.expr())
def visitAssignInstr(self, ctx):
val = self.visit(ctx.expr())
name = ctx.ID().getText()
print('now '+name+' has value '+str(val))
self._memory[name] = val
| lauregonnord/cap-labs | TP03/arith-visitor/MyAritVisitor.py | Python | gpl-3.0 | 1,520 | [
"VisIt"
] | 3e1af03c08bf91718ddfaa72f6f5316ba8be54f125977fc8934dd18e0ab1da69 |
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from io import BytesIO
import numpy
import unittest
from numpy import array
# Enthought library imports.
from tvtk.common import is_old_pipeline
from mayavi.core.null_engine import NullEngine
from mayavi.sources.builtin_image import BuiltinImage
from mayavi.modules.surface import Surface
from mayavi.modules.outline import Outline
class TestBuiltinImageSource(unittest.TestCase):
def setUp(self):
e = NullEngine()
# Uncomment to see visualization for debugging etc.
#e = Engine()
e.start()
s=e.new_scene()
image_data = BuiltinImage()
e.add_source(image_data)
outline = Outline()
e.add_module(outline)
surface = Surface()
e.add_module(surface)
image_data.data_source.radius = array([ 80., 80., 80.])
image_data.data_source.center = array([ 150., 150., 0.])
image_data.data_source.whole_extent = array([ 10, 245, 10, 245, 0, 0])
if is_old_pipeline():
image_data.data_source.update_whole_extent()
else:
image_data.data_source.set_update_extent_to_whole_extent()
self.e=e
self.scene = e.current_scene
return
def tearDown(self):
self.e.stop()
return
def test_data_source(self):
s = self.scene
src = s.children[0]
self.assertEqual(src.source,'ellipsoid')
self.assertEqual(numpy.allclose(src.data_source.center,(150., 150., 0.)),True)
self.assertEqual(numpy.allclose(src.data_source.radius,(80., 80., 80.)),True)
self.assertEqual(numpy.allclose(src.data_source.whole_extent,(10, 245, 10, 245, 0, 0)),True)
def check(self):
s = self.scene
src = s.children[0]
ot = src.children[0].children[0]
ot.render()
# Check with the default properties of gaussian image to verify
# that the source has actually changed
self.assertEqual(src.source,'gaussian')
self.assertEqual(numpy.allclose(src.data_source.center,(0., 0., 0.)),True)
self.assertEqual(src.data_source.maximum,2.0)
self.assertEqual(src.data_source.standard_deviation,15)
# Check the scalar ranges
sc = src.outputs[0].point_data.scalars
self.assertEqual(numpy.allclose(sc.range, (0, 2.0), atol=1.01e-03), True)
def test_change(self):
s = self.scene
src = s.children[0]
ot = src.children[0].children[0]
src.source = 'gaussian'
# Check with the default properties of gaussian image to verify
# that the source has actually changed
self.assertEqual(src.source,'gaussian')
self.assertEqual(numpy.allclose(src.data_source.center,(0., 0., 0.)),True)
self.assertEqual(src.data_source.maximum,1.0)
self.assertEqual(src.data_source.standard_deviation,100)
#Check the scalar ranges
self.assertEqual(numpy.allclose(src.outputs[0].point_data.scalars.range,(0.00149, 1.0),atol=1.01e-03),True)
src.data_source.maximum = 2.0
src.data_source.standard_deviation = 15
if not is_old_pipeline():
src.data_source.update()
self.check()
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
engine = self.e
scene = self.scene
src = scene.children[0]
src.source = 'gaussian'
src.data_source.maximum = 2.0
src.data_source.standard_deviation = 15
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
engine.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine.close_scene(scene)
# Load visualization
engine.load_visualization(f)
self.scene = engine.current_scene
self.check()
if __name__ == '__main__':
unittest.main()
| dmsurti/mayavi | mayavi/tests/test_builtin_image.py | Python | bsd-3-clause | 4,187 | [
"Gaussian",
"Mayavi"
] | 2ae96f920c8a68a54c529ef0475d5d6a9ba1e8bb266852fd89e4c2fc4ca7a795 |
# Publisher and subscriber design pattern example.
# More information about this design pattern can be found at:
# http://wiki.wxpython.org/ModelViewController
# http://wiki.wxpython.org/PubSub
from invesalius.pubsub import pub as Publisher
# The maintainer of Pubsub module is Oliver Schoenborn.
# Since the end of 2006 Pubsub is now maintained separately on SourceForge at:
# http://pubsub.sourceforge.net/
class Student:
def __init__(self, name):
self.name = name
self.mood = ":|"
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.ReceiveProject, "Set Student Project")
Publisher.subscribe(self.ReceiveGrade, "Set Student Grade")
def ReceiveProject(self, pubsub_evt):
projects_dict = pubsub_evt.data
self.project = projects_dict[self.name]
print "%s: I've received the project %s" % (self.name, self.project)
def ReceiveGrade(self, pubsub_evt):
grades_dict = pubsub_evt.data
self.grade = grades_dict[self.name]
if self.grade > 6:
self.mood = ":)"
else:
self.mood = ":("
print "%s: I've received the grade %d %s" % (self.name, self.grade, self.mood)
class Teacher:
def __init__(self, name, course):
self.name = name
self.course = course
def SendMessage(self):
print "%s: Telling students the projects" % (self.name)
Publisher.sendMessage("Set Student Project", self.course.projects_dict)
print "\n%s: Telling students the grades" % (self.name)
Publisher.sendMessage("Set Student Grade", self.course.grades_dict)
class Course:
def __init__(self, subject):
self.subject = subject
self.grades_dict = {}
self.projects_dict = {}
# Create students:
s1 = Student("Coelho")
s2 = Student("Victor")
s3 = Student("Thomaz")
# Create subject:
cs102 = Course("InVesalius")
cs102.projects_dict = {"Coelho": "wxPython", "Victor": "VTK", "Thomaz": "PIL"}
cs102.grades_dict = {"Coelho": 7, "Victor": 6.5, "Thomaz": 4}
# Create teacher:
andre = Teacher("Andre", cs102)
andre.SendMessage()
| paulojamorim/invesalius3 | docs/devel/example_pubsub.py | Python | gpl-2.0 | 2,140 | [
"VTK"
] | 19fa49816c5edd163ae7b40b6c16e6f5abc3436285ea92fde3b4c3b8c167ad8f |
########################################################################
# File : Watchdog.py
# Author: Stuart Paterson
########################################################################
""" The Watchdog class is used by the Job Wrapper to resolve and monitor
the system resource consumption. The Watchdog can determine if
a running job is stalled and indicate this to the Job Wrapper.
Furthermore, the Watchdog will identify when the Job CPU limit has been
exceeded and fail jobs meaningfully.
Information is returned to the WMS via the heart-beat mechanism. This
also interprets control signals from the WMS e.g. to kill a running
job.
- Still to implement:
- CPU normalization for correct comparison with job limit
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import re
import time
import resource
import errno
import socket
import getpass
import psutil
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities import MJF
from DIRAC.Core.Utilities.Profiler import Profiler
from DIRAC.Core.Utilities.Os import getDiskSpace
from DIRAC.Core.Utilities.Subprocess import getChildrenPIDs
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.TimeLeft import TimeLeft
from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient
from DIRAC.WorkloadManagementSystem.Client import JobMinorStatus
class Watchdog(object):
#############################################################################
def __init__(self, pid, exeThread, spObject, jobCPUTime,
memoryLimit=0, processors=1, jobArgs={}):
""" Constructor, takes system flag as argument.
"""
self.stopSigStartSeconds = int(jobArgs.get('StopSigStartSeconds', 1800)) # 30 minutes
self.stopSigFinishSeconds = int(jobArgs.get('StopSigFinishSeconds', 1800)) # 30 minutes
self.stopSigNumber = int(jobArgs.get('StopSigNumber', 2)) # SIGINT
self.stopSigRegex = jobArgs.get('StopSigRegex', None)
self.stopSigSent = False
self.log = gLogger.getSubLogger("Watchdog")
self.exeThread = exeThread
self.wrapperPID = pid
self.appPID = self.exeThread.getCurrentPID()
self.spObject = spObject
self.jobCPUTime = jobCPUTime
self.memoryLimit = memoryLimit
self.calibration = 0
self.initialValues = {}
self.parameters = {}
self.peekFailCount = 0
self.peekRetry = 5
self.profiler = Profiler(pid)
self.checkError = ''
self.currentStats = {}
self.initialized = False
self.count = 0
# defaults
self.testWallClock = 1
self.testDiskSpace = 1
self.testLoadAvg = 1
self.maxWallClockTime = 3 * 24 * 60 * 60
self.testCPUConsumed = 1
self.testCPULimit = 0
self.testMemoryLimit = 0
self.testTimeLeft = 1
self.pollingTime = 10 # 10 seconds
self.checkingTime = 30 * 60 # 30 minute period
self.minCheckingTime = 20 * 60 # 20 mins
self.wallClockCheckSeconds = 5 * 60 # 5 minutes
self.maxWallClockTime = 3 * 24 * 60 * 60 # e.g. 4 days
self.jobPeekFlag = 1 # on / off
self.minDiskSpace = 10 # MB
self.loadAvgLimit = 1000 # > 1000 and jobs killed
self.sampleCPUTime = 30 * 60 # e.g. up to 20mins sample
self.jobCPUMargin = 20 # %age buffer before killing job
self.minCPUWallClockRatio = 5 # ratio %age
self.nullCPULimit = 5 # After 5 sample times return null CPU consumption kill job
self.checkCount = 0
self.wallClockCheckCount = 0
self.nullCPUCount = 0
self.grossTimeLeftLimit = 10 * self.checkingTime
self.timeLeftUtil = TimeLeft()
self.timeLeft = 0
self.littleTimeLeft = False
self.scaleFactor = 1.0
self.processors = processors
#############################################################################
def initialize(self):
""" Watchdog initialization.
"""
if self.initialized:
self.log.info('Watchdog already initialized')
return S_OK()
else:
self.initialized = True
setup = gConfig.getValue('/DIRAC/Setup', '')
if not setup:
return S_ERROR('Can not get the DIRAC Setup value')
wms_instance = getSystemInstance("WorkloadManagement")
if not wms_instance:
return S_ERROR('Can not get the WorkloadManagement system instance')
self.section = '/Systems/WorkloadManagement/%s/JobWrapper' % wms_instance
self.log.verbose('Watchdog initialization')
# Test control flags
self.testWallClock = gConfig.getValue(self.section + '/CheckWallClockFlag', 1)
self.testDiskSpace = gConfig.getValue(self.section + '/CheckDiskSpaceFlag', 1)
self.testLoadAvg = gConfig.getValue(self.section + '/CheckLoadAvgFlag', 1)
self.testCPUConsumed = gConfig.getValue(self.section + '/CheckCPUConsumedFlag', 1)
self.testCPULimit = gConfig.getValue(self.section + '/CheckCPULimitFlag', 0)
self.testMemoryLimit = gConfig.getValue(self.section + '/CheckMemoryLimitFlag', 0)
self.testTimeLeft = gConfig.getValue(self.section + '/CheckTimeLeftFlag', 1)
# Other parameters
self.pollingTime = gConfig.getValue(self.section + '/PollingTime', 10) # 10 seconds
self.checkingTime = gConfig.getValue(self.section + '/CheckingTime', 30 * 60) # 30 minute period
self.minCheckingTime = gConfig.getValue(self.section + '/MinCheckingTime', 20 * 60) # 20 mins
self.maxWallClockTime = gConfig.getValue(self.section + '/MaxWallClockTime', 3 * 24 * 60 * 60) # e.g. 4 days
self.jobPeekFlag = gConfig.getValue(self.section + '/JobPeekFlag', 1) # on / off
self.minDiskSpace = gConfig.getValue(self.section + '/MinDiskSpace', 10) # MB
self.loadAvgLimit = gConfig.getValue(self.section + '/LoadAverageLimit', 1000) # > 1000 and jobs killed
self.sampleCPUTime = gConfig.getValue(self.section + '/CPUSampleTime', 30 * 60) # e.g. up to 20mins sample
self.jobCPUMargin = gConfig.getValue(self.section + '/JobCPULimitMargin', 20) # %age buffer before killing job
self.minCPUWallClockRatio = gConfig.getValue(self.section + '/MinCPUWallClockRatio', 5) # ratio %age
# After 5 sample times return null CPU consumption kill job
self.nullCPULimit = gConfig.getValue(self.section + '/NullCPUCountLimit', 5)
if self.checkingTime < self.minCheckingTime:
self.log.info(
'Requested CheckingTime of %s setting to %s seconds (minimum)' %
(self.checkingTime, self.minCheckingTime))
self.checkingTime = self.minCheckingTime
# The time left is returned in seconds @ 250 SI00 = 1 HS06,
# the self.checkingTime and self.pollingTime are in seconds,
# thus they need to be multiplied by a large enough factor
self.fineTimeLeftLimit = gConfig.getValue(self.section + '/TimeLeftLimit', 150 * self.pollingTime)
self.scaleFactor = gConfig.getValue('/LocalSite/CPUScalingFactor', 1.0)
return S_OK()
def run(self):
""" The main watchdog execution method
"""
result = self.initialize()
if not result['OK']:
self.log.always('Can not start watchdog for the following reason')
self.log.always(result['Message'])
return result
try:
while True:
self.log.debug('Starting watchdog loop # %d' % self.count)
start_cycle_time = time.time()
result = self.execute()
exec_cycle_time = time.time() - start_cycle_time
if not result['OK']:
self.log.error("Watchdog error during execution", result['Message'])
break
elif result['Value'] == "Ended":
break
self.count += 1
if exec_cycle_time < self.pollingTime:
time.sleep(self.pollingTime - exec_cycle_time)
return S_OK()
except Exception:
self.log.exception()
return S_ERROR('Exception')
#############################################################################
def execute(self):
""" The main agent execution method of the Watchdog.
"""
if not self.exeThread.is_alive():
self.__getUsageSummary()
self.log.info('Process to monitor has completed, Watchdog will exit.')
return S_OK("Ended")
# WallClock checks every self.wallClockCheckSeconds, but only if StopSigRegex is defined in JDL
if not self.stopSigSent and self.stopSigRegex is not None and (
time.time() - self.initialValues['StartTime']) > self.wallClockCheckSeconds * self.wallClockCheckCount:
self.wallClockCheckCount += 1
self._performWallClockChecks()
if self.littleTimeLeft:
# if we have gone over enough iterations query again
if self.littleTimeLeftCount == 0 and self.__timeLeft() == -1:
self.checkError = JobMinorStatus.JOB_EXCEEDED_CPU
self.log.error(self.checkError, self.timeLeft)
self.__killRunningThread()
return S_OK()
else:
self.littleTimeLeftCount -= 1
# Note: need to poll regularly to see if the thread is alive
# but only perform checks with a certain frequency
if (time.time() - self.initialValues['StartTime']) > self.checkingTime * self.checkCount:
self.checkCount += 1
result = self._performChecks()
if not result['OK']:
self.log.warn('Problem during recent checks')
self.log.warn(result['Message'])
return S_OK()
else:
# self.log.debug('Application thread is alive: checking count is %s' %(self.checkCount))
return S_OK()
#############################################################################
def _performWallClockChecks(self):
"""Watchdog performs the wall clock checks based on MJF. Signals are sent
to processes if we need to stop, but function always returns S_OK()
"""
mjf = MJF.MJF()
try:
wallClockSecondsLeft = mjf.getWallClockSecondsLeft()
except Exception as e:
# Just stop if we can't get the wall clock seconds left
return S_OK()
jobstartSeconds = mjf.getIntJobFeature('jobstart_secs')
if jobstartSeconds is None:
# Just stop if we don't know when the job started
return S_OK()
if (int(time.time()) > jobstartSeconds + self.stopSigStartSeconds) and \
(wallClockSecondsLeft < self.stopSigFinishSeconds + self.wallClockCheckSeconds):
# Need to send the signal! Assume it works to avoid sending the signal more than once
self.log.info('Sending signal to JobWrapper children', "(%s)" % self.stopSigNumber)
self.stopSigSent = True
try:
for childPid in getChildrenPIDs(self.wrapperPID):
try:
cmdline = open('/proc/%d/cmdline' % childPid, 'r').read().replace('\0', ' ').strip()
except IOError:
# Process gone away? Not running on Linux? Skip anyway
continue
if re.search(self.stopSigRegex, cmdline) is not None:
self.log.info(
'Sending signal %d to process ID %d, cmdline = "%s"' %
(self.stopSigNumber, childPid, cmdline))
os.kill(childPid, self.stopSigNumber)
except Exception as e:
self.log.error("Failed to send signals to JobWrapper children!", repr(e))
return S_OK()
#############################################################################
def _performChecks(self):
"""The Watchdog checks are performed at a different period to the checking of the
application thread and correspond to the checkingTime.
"""
self.log.verbose('------------------------------------')
self.log.verbose('Checking loop starts for Watchdog')
heartBeatDict = {}
msg = ''
loadAvg = float(os.getloadavg()[0])
msg += 'LoadAvg: %d ' % loadAvg
heartBeatDict['LoadAverage'] = loadAvg
if 'LoadAverage' not in self.parameters:
self.parameters['LoadAverage'] = []
self.parameters['LoadAverage'].append(loadAvg)
memoryUsed = self.getMemoryUsed()
msg += 'MemUsed: %.1f kb ' % (memoryUsed)
heartBeatDict['MemoryUsed'] = memoryUsed
if 'MemoryUsed' not in self.parameters:
self.parameters['MemoryUsed'] = []
self.parameters['MemoryUsed'].append(memoryUsed)
result = self.profiler.vSizeUsage(withChildren=True)
if not result['OK']:
self.log.warn("Could not get vSize info from profiler", result['Message'])
else:
vsize = result['Value'] * 1024.
heartBeatDict['Vsize'] = vsize
self.parameters.setdefault('Vsize', [])
self.parameters['Vsize'].append(vsize)
msg += "Job Vsize: %.1f kb " % vsize
result = self.profiler.memoryUsage(withChildren=True)
if not result['OK']:
self.log.warn("Could not get rss info from profiler", result['Message'])
else:
rss = result['Value'] * 1024.
heartBeatDict['RSS'] = rss
self.parameters.setdefault('RSS', [])
self.parameters['RSS'].append(rss)
msg += "Job RSS: %.1f kb " % rss
if 'DiskSpace' not in self.parameters:
self.parameters['DiskSpace'] = []
# We exclude fuse so that mountpoints can be cleaned up by automount after a period unused
# (specific request from CERN batch service).
result = self.getDiskSpace(exclude='fuse')
if not result['OK']:
self.log.warn("Could not establish DiskSpace", result['Message'])
else:
msg += 'DiskSpace: %.1f MB ' % (result['Value'])
self.parameters['DiskSpace'].append(result['Value'])
heartBeatDict['AvailableDiskSpace'] = result['Value']
cpu = self.__getCPU()
if not cpu['OK']:
msg += 'CPU: ERROR '
hmsCPU = 0
else:
cpu = cpu['Value']
msg += 'CPU: %s (h:m:s) ' % (cpu)
if 'CPUConsumed' not in self.parameters:
self.parameters['CPUConsumed'] = []
self.parameters['CPUConsumed'].append(cpu)
hmsCPU = cpu
rawCPU = self.__convertCPUTime(hmsCPU)
if rawCPU['OK']:
heartBeatDict['CPUConsumed'] = rawCPU['Value']
result = self.__getWallClockTime()
if not result['OK']:
self.log.warn("Failed determining wall clock time", result['Message'])
else:
msg += 'WallClock: %.2f s ' % (result['Value'])
self.parameters.setdefault('WallClockTime', list()).append(result['Value'])
heartBeatDict['WallClockTime'] = result['Value'] * self.processors
self.log.info(msg)
result = self._checkProgress()
if not result['OK']:
self.checkError = result['Message']
self.log.warn(self.checkError)
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
self.log.info('Last lines of available application output:')
self.log.info('================START================')
for line in outputList:
self.log.info(line)
self.log.info('=================END=================')
self.__killRunningThread()
return S_OK()
recentStdOut = 'None'
if self.jobPeekFlag:
result = self.__peek()
if result['OK']:
outputList = result['Value']
size = len(outputList)
recentStdOut = 'Last %s lines of application output from Watchdog on %s [UTC]:' % (size, Time.dateTime())
border = '=' * len(recentStdOut)
cpuTotal = 'Last reported CPU consumed for job is %s (h:m:s)' % (hmsCPU)
if self.timeLeft:
cpuTotal += ', Batch Queue Time Left %s (s @ HS06)' % self.timeLeft
recentStdOut = '\n%s\n%s\n%s\n%s\n' % (border, recentStdOut, cpuTotal, border)
self.log.info(recentStdOut)
for line in outputList:
self.log.info(line)
recentStdOut += line + '\n'
else:
recentStdOut = 'Watchdog is initializing and will attempt to obtain standard output from application thread'
self.log.info(recentStdOut)
self.peekFailCount += 1
if self.peekFailCount > self.peekRetry:
self.jobPeekFlag = 0
self.log.warn('Turning off job peeking for remainder of execution')
if 'JOBID' not in os.environ:
self.log.info('Running without JOBID so parameters will not be reported')
return S_OK()
jobID = os.environ['JOBID']
staticParamDict = {'StandardOutput': recentStdOut}
self.__sendSignOfLife(int(jobID), heartBeatDict, staticParamDict)
return S_OK('Watchdog checking cycle complete')
#############################################################################
def __getCPU(self):
"""Uses the profiler to get CPU time for current process, its child, and the terminated child,
and returns HH:MM:SS after conversion.
"""
result = self.profiler.cpuUsageUser(withChildren=True,
withTerminatedChildren=True)
if not result['OK']:
self.log.warn("Issue while checking consumed CPU for user", result['Message'])
if result['Errno'] == errno.ESRCH:
self.log.warn("The main process does not exist (anymore). This might be correct.")
return result
cpuUsageUser = result['Value']
result = self.profiler.cpuUsageSystem(withChildren=True,
withTerminatedChildren=True)
if not result['OK']:
self.log.warn("Issue while checking consumed CPU for system", result['Message'])
if result['Errno'] == errno.ESRCH:
self.log.warn("The main process does not exist (anymore). This might be correct.")
return result
cpuUsageSystem = result['Value']
cpuTimeTotal = cpuUsageUser + cpuUsageSystem
if cpuTimeTotal:
self.log.verbose("Raw CPU time consumed (s) =", cpuTimeTotal)
return self.__getCPUHMS(cpuTimeTotal)
self.log.error("CPU time consumed found to be 0")
return S_ERROR()
#############################################################################
def __getCPUHMS(self, cpuTime):
mins, secs = divmod(cpuTime, 60)
hours, mins = divmod(mins, 60)
humanTime = '%02d:%02d:%02d' % (hours, mins, secs)
self.log.verbose('Human readable CPU time is: %s' % humanTime)
return S_OK(humanTime)
#############################################################################
def __interpretControlSignal(self, signalDict):
"""This method is called whenever a signal is sent via the result of
sending a sign of life.
"""
self.log.info('Received control signal')
if isinstance(signalDict, dict):
if 'Kill' in signalDict:
self.log.info('Received Kill signal, stopping job via control signal')
self.checkError = JobMinorStatus.RECEIVED_KILL_SIGNAL
self.__killRunningThread()
else:
self.log.info('The following control signal was sent but not understood by the watchdog:')
self.log.info(signalDict)
else:
self.log.info("Expected dictionary for control signal", "received:\n%s" % (signalDict))
return S_OK()
#############################################################################
def _checkProgress(self):
"""This method calls specific tests to determine whether the job execution
is proceeding normally. CS flags can easily be added to add or remove
tests via central configuration.
"""
report = ''
if self.testWallClock:
result = self.__checkWallClockTime()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'WallClock: OK, '
else:
report += 'WallClock: NA,'
if self.testDiskSpace:
result = self.__checkDiskSpace()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'DiskSpace: OK, '
else:
report += 'DiskSpace: NA,'
if self.testLoadAvg:
result = self.__checkLoadAverage()
if not result['OK']:
self.log.warn("Check of load average failed, but won't fail because of that",
": %s" % result['Message'])
report += 'LoadAverage: ERROR, '
return S_OK()
report += 'LoadAverage: OK, '
else:
report += 'LoadAverage: NA,'
if self.testCPUConsumed:
result = self.__checkCPUConsumed()
if not result['OK']:
return result
report += 'CPUConsumed: OK, '
else:
report += 'CPUConsumed: NA, '
if self.testCPULimit:
result = self.__checkCPULimit()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'CPULimit OK, '
else:
report += 'CPULimit: NA, '
if self.testTimeLeft:
self.__timeLeft()
if self.timeLeft:
report += 'TimeLeft: OK'
else:
report += 'TimeLeft: NA'
if self.testMemoryLimit:
result = self.__checkMemoryLimit()
if not result['OK']:
self.log.warn(result['Message'])
return result
report += 'MemoryLimit OK, '
else:
report += 'MemoryLimit: NA, '
self.log.info(report)
return S_OK('All enabled checks passed')
#############################################################################
def __checkCPUConsumed(self):
""" Checks whether the CPU consumed by application process is reasonable. This
method will report stalled jobs to be killed.
"""
self.log.info("Checking CPU Consumed")
if 'WallClockTime' not in self.parameters:
return S_ERROR('Missing WallClockTime info')
if 'CPUConsumed' not in self.parameters:
return S_ERROR('Missing CPUConsumed info')
wallClockTime = self.parameters['WallClockTime'][-1]
if wallClockTime < self.sampleCPUTime:
self.log.info("Stopping check, wallclock time is still smaller than sample time",
"(%s) < (%s)" % (wallClockTime, self.sampleCPUTime))
return S_OK()
intervals = max(1, int(self.sampleCPUTime / self.checkingTime))
if len(self.parameters['CPUConsumed']) < intervals + 1:
self.log.info("Not enough snapshots to calculate", "there are %s and we need %s" %
(len(self.parameters['CPUConsumed']), intervals + 1))
return S_OK()
wallClockTime = self.parameters['WallClockTime'][-1] - self.parameters['WallClockTime'][-1 - intervals]
try:
cpuTime = self.__convertCPUTime(self.parameters['CPUConsumed'][-1])['Value']
# For some reason, some times the CPU consumed estimation returns 0
# if cpuTime == 0:
# return S_OK()
cpuTime -= self.__convertCPUTime(self.parameters['CPUConsumed'][-1 - intervals])['Value']
if cpuTime < 0:
self.log.warn('Consumed CPU time negative, something wrong may have happened, ignore')
return S_OK()
if wallClockTime <= 0:
self.log.warn('Wallclock time should not be negative or zero, Ignore')
return S_OK()
ratio = (cpuTime / wallClockTime) * 100
self.log.info("CPU/Wallclock ratio is %.2f%%" % ratio)
# in case of error cpuTime might be 0, exclude this
if ratio < self.minCPUWallClockRatio:
if os.path.exists('DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK') or \
'DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK' in os.environ:
self.log.warn('N.B. job would be declared as stalled but CPU / WallClock check is disabled by payload')
return S_OK()
self.log.info("Job is stalled!")
return S_ERROR(JobMinorStatus.WATCHDOG_STALLED)
except Exception as e:
self.log.error("Cannot convert CPU consumed from string to int", str(e))
return S_OK()
#############################################################################
def __convertCPUTime(self, cputime):
""" Method to convert the CPU time as returned from the Watchdog
instances to the equivalent DIRAC normalized CPU time to be compared
to the Job CPU requirement.
"""
cpuValue = 0
cpuHMS = cputime.split(':')
# for i in range( len( cpuHMS ) ):
# cpuHMS[i] = cpuHMS[i].replace( '00', '0' )
try:
hours = float(cpuHMS[0]) * 60 * 60
mins = float(cpuHMS[1]) * 60
secs = float(cpuHMS[2])
cpuValue = float(hours + mins + secs)
except Exception as x:
self.log.warn(str(x))
return S_ERROR('Could not calculate CPU time')
# Normalization to be implemented
normalizedCPUValue = cpuValue
result = S_OK()
result['Value'] = normalizedCPUValue
self.log.debug('CPU value %s converted to %s' % (cputime, normalizedCPUValue))
return result
#############################################################################
def __checkCPULimit(self):
""" Checks that the job has consumed more than the job CPU requirement
(plus a configurable margin) and kills them as necessary.
"""
consumedCPU = 0
if 'CPUConsumed' in self.parameters:
consumedCPU = self.parameters['CPUConsumed'][-1]
consumedCPUDict = self.__convertCPUTime(consumedCPU)
if consumedCPUDict['OK']:
currentCPU = consumedCPUDict['Value']
else:
return S_OK('Not possible to determine current CPU consumed')
if consumedCPU:
limit = int(self.jobCPUTime + self.jobCPUTime * (self.jobCPUMargin / 100))
cpuConsumed = float(currentCPU)
if cpuConsumed > limit:
self.log.info(
'Job has consumed more than the specified CPU limit', 'with an additional %s%% margin' %
(self.jobCPUMargin))
return S_ERROR('Job has exceeded maximum CPU time limit')
return S_OK('Job within CPU limit')
if not currentCPU:
self.log.verbose('Both initial and current CPU consumed are null')
return S_OK('CPU consumed is not measurable yet')
return S_OK('Not possible to determine CPU consumed')
def __checkMemoryLimit(self):
""" Checks that the job memory consumption is within a limit
"""
vsize = 0
if 'Vsize' in self.parameters:
vsize = self.parameters['Vsize'][-1]
if vsize and self.memoryLimit:
if vsize > self.memoryLimit:
vsize = vsize
# Just a warning for the moment
self.log.warn("Job has consumed %f.2 KB of memory with the limit of %f.2 KB" % (vsize, self.memoryLimit))
return S_OK()
#############################################################################
def __checkDiskSpace(self):
"""Checks whether the CS defined minimum disk space is available.
"""
if 'DiskSpace' in self.parameters:
availSpace = self.parameters['DiskSpace'][-1]
if availSpace >= 0 and availSpace < self.minDiskSpace:
self.log.info('Not enough local disk space for job to continue, defined in CS as %s MB' % (self.minDiskSpace))
return S_ERROR(JobMinorStatus.JOB_INSUFFICIENT_DISK)
else:
return S_OK('Job has enough disk space available')
else:
return S_ERROR('Available disk space could not be established')
#############################################################################
def __checkWallClockTime(self):
"""Checks whether the job has been running for the CS defined maximum
wall clock time.
"""
if 'StartTime' in self.initialValues:
startTime = self.initialValues['StartTime']
if time.time() - startTime > self.maxWallClockTime:
self.log.info('Job has exceeded maximum wall clock time of %s seconds' % (self.maxWallClockTime))
return S_ERROR(JobMinorStatus.JOB_EXCEEDED_WALL_CLOCK)
else:
return S_OK('Job within maximum wall clock time')
else:
return S_ERROR('Job start time could not be established')
#############################################################################
def __checkLoadAverage(self):
"""Checks whether the CS defined maximum load average is exceeded.
"""
if 'LoadAverage' in self.parameters:
loadAvg = self.parameters['LoadAverage'][-1]
if loadAvg > float(self.loadAvgLimit):
self.log.info('Maximum load average exceeded, defined in CS as %s ' % (self.loadAvgLimit))
return S_ERROR('Job exceeded maximum load average')
return S_OK('Job running with normal load average')
return S_ERROR('Job load average not established')
#############################################################################
def __peek(self):
""" Uses ExecutionThread.getOutput() method to obtain standard output
from running thread via subprocess callback function.
"""
result = self.exeThread.getOutput()
if not result['OK']:
self.log.warn('Could not obtain output from running application thread')
self.log.warn(result['Message'])
return result
#############################################################################
def calibrate(self):
""" The calibrate method obtains the initial values for system memory and load
and calculates the margin for error for the rest of the Watchdog cycle.
"""
self.__getWallClockTime()
self.parameters['WallClockTime'] = []
cpuConsumed = self.__getCPU()
if not cpuConsumed['OK']:
self.log.warn("Could not establish CPU consumed, setting to 0.0")
cpuConsumed = 0.0
else:
cpuConsumed = cpuConsumed['Value']
self.initialValues['CPUConsumed'] = cpuConsumed
self.parameters['CPUConsumed'] = []
self.initialValues['LoadAverage'] = float(os.getloadavg()[0])
self.parameters['LoadAverage'] = []
memUsed = self.getMemoryUsed()
self.initialValues['MemoryUsed'] = memUsed
self.parameters['MemoryUsed'] = []
result = self.profiler.vSizeUsage(withChildren=True)
if not result['OK']:
self.log.warn("Could not get vSize info from profiler", result['Message'])
else:
vsize = result['Value'] * 1024.
self.initialValues['Vsize'] = vsize
self.log.verbose("Vsize(kb)", "%.1f" % vsize)
self.parameters['Vsize'] = []
result = self.profiler.memoryUsage(withChildren=True)
if not result['OK']:
self.log.warn("Could not get rss info from profiler", result['Message'])
else:
rss = result['Value'] * 1024.
self.initialValues['RSS'] = rss
self.log.verbose("RSS(kb)", "%.1f" % rss)
self.parameters['RSS'] = []
# We exclude fuse so that mountpoints can be cleaned up by automount after a period unused
# (specific request from CERN batch service).
result = self.getDiskSpace(exclude='fuse')
self.log.verbose('DiskSpace: %s' % (result))
if not result['OK']:
self.log.warn("Could not establish DiskSpace")
else:
self.initialValues['DiskSpace'] = result['Value']
self.parameters['DiskSpace'] = []
result = self.getNodeInformation()
self.log.verbose('NodeInfo', result)
if 'LSB_JOBID' in os.environ:
result['LocalJobID'] = os.environ['LSB_JOBID']
if 'PBS_JOBID' in os.environ:
result['LocalJobID'] = os.environ['PBS_JOBID']
if 'QSUB_REQNAME' in os.environ:
result['LocalJobID'] = os.environ['QSUB_REQNAME']
if 'JOB_ID' in os.environ:
result['LocalJobID'] = os.environ['JOB_ID']
self.__reportParameters(result, 'NodeInformation', True)
self.__reportParameters(self.initialValues, 'InitialValues')
return S_OK()
def __timeLeft(self):
"""
return Normalized CPU time left in the batch system
0 if not available
update self.timeLeft and self.littleTimeLeft
"""
# Get CPU time left in the batch system
result = self.timeLeftUtil.getTimeLeft(0.0)
if not result['OK']:
# Could not get CPU time left, we might need to wait for the first loop
# or the Utility is not working properly for this batch system
# or we are in a batch system
timeLeft = 0
else:
timeLeft = result['Value']
self.timeLeft = timeLeft
if not self.littleTimeLeft:
if timeLeft and timeLeft < self.grossTimeLeftLimit:
self.log.info('TimeLeft bellow %s, now checking with higher frequency' % timeLeft)
self.littleTimeLeft = True
# TODO: better configurable way of doing this to be coded
self.littleTimeLeftCount = 15
else:
if self.timeLeft and self.timeLeft < self.fineTimeLeftLimit:
timeLeft = -1
return timeLeft
#############################################################################
def __getUsageSummary(self):
""" Returns average load, memory etc. over execution of job thread
"""
summary = {}
# CPUConsumed
if 'CPUConsumed' in self.parameters:
cpuList = self.parameters['CPUConsumed']
if cpuList:
hmsCPU = cpuList[-1]
rawCPU = self.__convertCPUTime(hmsCPU)
if rawCPU['OK']:
summary['LastUpdateCPU(s)'] = rawCPU['Value']
else:
summary['LastUpdateCPU(s)'] = 'Could not be estimated'
# DiskSpace
if 'DiskSpace' in self.parameters:
space = self.parameters['DiskSpace']
if space:
value = abs(float(space[-1]) - float(self.initialValues['DiskSpace']))
if value < 0:
value = 0
summary['DiskSpace(MB)'] = value
else:
summary['DiskSpace(MB)'] = 'Could not be estimated'
# MemoryUsed
if 'MemoryUsed' in self.parameters:
memory = self.parameters['MemoryUsed']
if memory:
summary['MemoryUsed(kb)'] = abs(float(memory[-1]) - float(self.initialValues['MemoryUsed']))
else:
summary['MemoryUsed(kb)'] = 'Could not be estimated'
# LoadAverage
if 'LoadAverage' in self.parameters:
laList = self.parameters['LoadAverage']
if laList:
summary['LoadAverage'] = sum(laList) / len(laList)
else:
summary['LoadAverage'] = 'Could not be estimated'
result = self.__getWallClockTime()
if not result['OK']:
self.log.warn("Failed determining wall clock time", result['Message'])
summary['WallClockTime(s)'] = 0
summary['ScaledCPUTime(s)'] = 0
else:
wallClock = result['Value']
summary['WallClockTime(s)'] = wallClock * self.processors
summary['ScaledCPUTime(s)'] = wallClock * self.scaleFactor * self.processors
self.__reportParameters(summary, 'UsageSummary', True)
self.currentStats = summary
#############################################################################
def __reportParameters(self, params, title=None, report=False):
"""Will report parameters for job.
"""
try:
parameters = []
self.log.info('', '==========================================================')
if title:
self.log.info('Watchdog will report', title)
else:
self.log.info('Watchdog will report parameters')
self.log.info('', '==========================================================')
vals = params
if 'Value' in params:
if vals['Value']:
vals = params['Value']
for k, v in vals.items():
if v:
self.log.info(str(k) + ' = ' + str(v))
parameters.append([k, v])
if report:
self.__setJobParamList(parameters)
self.log.info('', '==========================================================')
except Exception as x:
self.log.warn('Problem while reporting parameters')
self.log.warn(repr(x))
#############################################################################
def __getWallClockTime(self):
""" Establishes the Wall Clock time spent since the Watchdog initialization"""
result = S_OK()
if 'StartTime' in self.initialValues:
currentTime = time.time()
wallClock = currentTime - self.initialValues['StartTime']
result['Value'] = wallClock
else:
self.initialValues['StartTime'] = time.time()
result['Value'] = 0.0
return result
#############################################################################
def __killRunningThread(self):
""" Will kill the running thread process and any child processes."""
self.log.info('Sending kill signal to application PID %s' % (self.spObject.getChildPID()))
result = self.spObject.killChild()
self.applicationKilled = True
self.log.info('Subprocess.killChild() returned:%s ' % (result))
return S_OK('Thread killed')
#############################################################################
def __sendSignOfLife(self, jobID, heartBeatDict, staticParamDict):
""" Sends sign of life 'heartbeat' signal and triggers control signal
interpretation.
"""
result = JobStateUpdateClient().sendHeartBeat(jobID, heartBeatDict, staticParamDict)
if not result['OK']:
self.log.warn('Problem sending sign of life')
self.log.warn(result)
if result['OK'] and result['Value']:
self.__interpretControlSignal(result['Value'])
return result
#############################################################################
def __setJobParamList(self, value):
"""Wraps around setJobParameters of state update client
"""
# job wrapper template sets the jobID variable
if 'JOBID' not in os.environ:
self.log.info('Running without JOBID so parameters will not be reported')
return S_OK()
jobID = os.environ['JOBID']
jobParam = JobStateUpdateClient().setJobParameters(int(jobID), value)
self.log.verbose('setJobParameters(%s,%s)' % (jobID, value))
if not jobParam['OK']:
self.log.warn(jobParam['Message'])
return jobParam
#############################################################################
def getNodeInformation(self):
""" Retrieves all static system information
"""
result = {}
result["HostName"] = socket.gethostname()
result["CPU(MHz)"] = psutil.cpu_freq()[0]
result["Memory(kB)"] = int(psutil.virtual_memory()[1] / 1024)
result["LocalAccount"] = getpass.getuser()
with open("/proc/cpuinfo", "r") as cpuinfo:
info = cpuinfo.readlines()
result["ModelName"] = info[4].split(':')[1].replace(' ', '').replace('\n', '')
result["CacheSize(kB)"] = [x.strip().split(":")[1] for x in info if "cache size" in x][0].strip()
return result
#############################################################################
def getMemoryUsed(self):
"""Obtains the memory used.
"""
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + \
resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss
return float(mem)
#############################################################################
def getDiskSpace(self, exclude=None):
"""Obtains the available disk space.
"""
result = S_OK()
diskSpace = getDiskSpace(exclude=exclude)
if diskSpace == -1:
result = S_ERROR('Could not obtain disk usage')
self.log.warn(' Could not obtain disk usage')
result['Value'] = float(-1)
return result
result['Value'] = float(diskSpace)
return result
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| yujikato/DIRAC | src/DIRAC/WorkloadManagementSystem/JobWrapper/Watchdog.py | Python | gpl-3.0 | 38,791 | [
"DIRAC"
] | c069e947d8e64055fff3c9d22a357284f99858250deb5a0a6c15bbe61428e1d1 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import sys
import itertools
import json
import platform
import re
import warnings
from time import sleep
from monty.json import MontyDecoder, MontyEncoder
from copy import deepcopy
from pymatgen import SETTINGS, __version__ as pmg_version
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from pymatgen.entries.exp_entries import ExpEntry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.sequence import get_chunks, PBar
"""
This module provides classes to interface with the Materials Project REST
API v2 to enable the creation of data structures and pymatgen objects using
Materials Project data.
To make use of the Materials API, you need to be a registered user of the
Materials Project, and obtain an API key by going to your dashboard at
https://www.materialsproject.org/dashboard.
"""
__author__ = "Shyue Ping Ong, Shreyas Cholia"
__credits__ = "Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Feb 22, 2013"
class MPRester:
"""
A class to conveniently interface with the Materials Project REST
interface. The recommended way to use MPRester is with the "with" context
manager to ensure that sessions are properly closed after usage::
with MPRester("API_KEY") as m:
do_something
MPRester uses the "requests" package, which provides for HTTP connection
pooling. All connections are made via https for security.
For more advanced uses of the Materials API, please consult the API
documentation at https://github.com/materialsproject/mapidoc.
Args:
api_key (str): A String API key for accessing the MaterialsProject
REST interface. Please obtain your API key at
https://www.materialsproject.org/dashboard. If this is None,
the code will check if there is a "PMG_MAPI_KEY" setting.
If so, it will use that environment variable. This makes
easier for heavy users to simply add this environment variable to
their setups and MPRester can then be called without any arguments.
endpoint (str): Url of endpoint to access the MaterialsProject REST
interface. Defaults to the standard Materials Project REST
address at "https://materialsproject.org/rest/v2", but
can be changed to other urls implementing a similar interface.
include_user_agent (bool): If True, will include a user agent with the
HTTP request including information on pymatgen and system version
making the API request. This helps MP support pymatgen users, and
is similar to what most web browsers send with each page request.
Set to False to disable the user agent.
"""
supported_properties = ("energy", "energy_per_atom", "volume",
"formation_energy_per_atom", "nsites",
"unit_cell_formula", "pretty_formula",
"is_hubbard", "elements", "nelements",
"e_above_hull", "hubbards", "is_compatible",
"spacegroup", "task_ids", "band_gap", "density",
"icsd_id", "icsd_ids", "cif", "total_magnetization",
"material_id", "oxide_type", "tags", "elasticity")
supported_task_properties = ("energy", "energy_per_atom", "volume",
"formation_energy_per_atom", "nsites",
"unit_cell_formula", "pretty_formula",
"is_hubbard",
"elements", "nelements", "e_above_hull",
"hubbards",
"is_compatible", "spacegroup",
"band_gap", "density", "icsd_id", "cif")
def __init__(self, api_key=None, endpoint=None, include_user_agent=True):
if api_key is not None:
self.api_key = api_key
else:
self.api_key = SETTINGS.get("PMG_MAPI_KEY", "")
if endpoint is not None:
self.preamble = endpoint
else:
self.preamble = SETTINGS.get("PMG_MAPI_ENDPOINT",
"https://materialsproject.org/rest/v2")
if self.preamble != "https://materialsproject.org/rest/v2":
warnings.warn("Non-default endpoint used: {}".format(self.preamble))
import requests
if sys.version_info[0] < 3:
try:
from pybtex import __version__
except ImportError:
warnings.warn("If you query for structure data encoded using MP's "
"Structure Notation Language (SNL) format and you use "
"`mp_decode=True` (the default) for MPRester queries, "
"you should install dependencies via "
"`pip install pymatgen[matproj.snl]`.")
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
if include_user_agent:
pymatgen_info = "pymatgen/"+pmg_version
python_info = "Python/{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro)
platform_info = "{}/{}".format(platform.system(), platform.release())
self.session.headers["user-agent"] = "{} ({} {})".format(
pymatgen_info, python_info, platform_info)
def __enter__(self):
"""
Support for "with" context.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Support for "with" context.
"""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET",
mp_decode=True):
response = None
url = self.preamble + sub_url
try:
if method == "POST":
response = self.session.post(url, data=payload, verify=True)
else:
response = self.session.get(url, params=payload, verify=True)
if response.status_code in [200, 400]:
if mp_decode:
data = json.loads(response.text, cls=MontyDecoder)
else:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
else:
raise MPRestError(data["error"])
raise MPRestError("REST query returned with error status code {}"
.format(response.status_code))
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), response.content) \
if hasattr(response, "content") else str(ex)
raise MPRestError(msg)
def get_materials_id_from_task_id(self, task_id):
"""
Returns a new MP materials id from a task id (which can be
equivalent to an old materials id)
Args:
task_id (str): A task id.
Returns:
materials_id (str)
"""
return self._make_request("/materials/mid_from_tid/%s" % task_id)
def get_materials_id_references(self, material_id):
"""
Returns all references for a materials id.
Args:
material_id (str): A material id.
Returns:
BibTeX (str)
"""
return self._make_request("/materials/%s/refs" % material_id)
def get_data(self, chemsys_formula_id, data_type="vasp", prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
This is generally a call to
https://www.materialsproject.org/rest/v2/materials/vasp/<prop>.
See https://github.com/materialsproject/mapidoc for details.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
data_type (str): Type of data to return. Currently can either be
"vasp" or "exp".
prop (str): Property to be obtained. Should be one of the
MPRester.supported_task_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/materials/%s/%s" % (chemsys_formula_id, data_type)
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_materials_ids(self, chemsys_formula):
"""
Get all materials ids for a formula or chemsys.
Args:
chemsys_formula (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3).
Returns:
([str]) List of all materials ids.
"""
return self._make_request("/materials/%s/mids" % chemsys_formula,
mp_decode=False)
def get_doc(self, materials_id):
"""
Get the entire data document for one materials id. Use this judiciously.
REST Endpoint: https://www.materialsproject.org/materials/<mp-id>/doc.
Args:
materials_id (str): E.g., mp-1143 for Al2O3
Returns:
Dict of json document of all data that is displayed on a materials
details page.
"""
return self._make_request("/materials/%s/doc" % materials_id,
mp_decode=False)
def get_task_data(self, chemsys_formula_id, prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Unlike the :func:`get_data`_, this method queries the task collection
for specific run information.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
prop (str): Property to be obtained. Should be one of the
MPRester.supported_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/tasks/%s" % chemsys_formula_id
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_structures(self, chemsys_formula_id, final=True):
"""
Get a list of Structures corresponding to a chemical system, formula,
or materials_id.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
Returns:
List of Structure objects.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(chemsys_formula_id, prop=prop)
return [d[prop] for d in data]
def find_structure(self, filename_or_structure):
"""
Finds matching structures on the Materials Project site.
Args:
filename_or_structure: filename or Structure object
Returns:
A list of matching structures.
Raises:
MPRestError
"""
try:
if isinstance(filename_or_structure, str):
s = Structure.from_file(filename_or_structure)
elif isinstance(filename_or_structure, Structure):
s = filename_or_structure
else:
raise MPRestError("Provide filename or Structure object.")
payload = {'structure': json.dumps(s.as_dict(), cls=MontyEncoder)}
response = self.session.post(
'{}/find_structure'.format(self.preamble), data=payload
)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp['valid_response']:
return resp['response']
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_entries(self, chemsys_formula_id_criteria, compatible_only=True,
inc_structure=None, property_data=None,
conventional_unit_cell=False, sort_by_e_above_hull=False):
"""
Get a list of ComputedEntries or ComputedStructureEntries corresponding
to a chemical system, formula, or materials_id or full criteria.
Args:
chemsys_formula_id_criteria (str/dict): A chemical system
(e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id
(e.g., mp-1234) or full Mongo-style dict criteria.
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="initial",
ComputedStructureEntries with initial structures are returned.
Otherwise, ComputedStructureEntries with final structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
sort_by_e_above_hull (bool): Whether to sort the list of entries by
e_above_hull (will query e_above_hull as a property_data if True).
Returns:
List of ComputedEntry or ComputedStructureEntry objects.
"""
# TODO: This is a very hackish way of doing this. It should be fixed
# on the REST end.
params = ["run_type", "is_hubbard", "pseudo_potential", "hubbards",
"potcar_symbols", "oxide_type"]
props = ["energy", "unit_cell_formula", "task_id"] + params
if sort_by_e_above_hull:
if property_data and "e_above_hull" not in property_data:
property_data.append("e_above_hull")
elif not property_data:
property_data = ["e_above_hull"]
if property_data:
props += property_data
if inc_structure:
if inc_structure == "initial":
props.append("initial_structure")
else:
props.append("structure")
if not isinstance(chemsys_formula_id_criteria, dict):
criteria = MPRester.parse_criteria(chemsys_formula_id_criteria)
else:
criteria = chemsys_formula_id_criteria
data = self.query(criteria, props)
entries = []
for d in data:
d["potcar_symbols"] = [
"%s %s" % (d["pseudo_potential"]["functional"], l)
for l in d["pseudo_potential"]["labels"]]
data = {"oxide_type": d["oxide_type"]}
if property_data:
data.update({k: d[k] for k in property_data})
if not inc_structure:
e = ComputedEntry(d["unit_cell_formula"], d["energy"],
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"])
else:
prim = d["initial_structure"] if inc_structure == "initial" \
else d["structure"]
if conventional_unit_cell:
s = SpacegroupAnalyzer(prim).get_conventional_standard_structure()
energy = d["energy"] * (len(s) / len(prim))
else:
s = prim.copy()
energy = d["energy"]
e = ComputedStructureEntry(
s, energy,
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"])
entries.append(e)
if compatible_only:
from pymatgen.entries.compatibility import \
MaterialsProjectCompatibility
entries = MaterialsProjectCompatibility().process_entries(entries)
if sort_by_e_above_hull:
entries = sorted(entries, key=lambda entry: entry.data["e_above_hull"])
return entries
def get_pourbaix_entries(self, chemsys):
"""
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys ([str]): A list of elements comprising the chemical
system, e.g. ['Li', 'Fe']
"""
from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, IonEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.core.ion import Ion
from pymatgen.entries.compatibility import \
MaterialsProjectAqueousCompatibility
pbx_entries = []
# Get ion entries first, because certain ions have reference
# solids that aren't necessarily in the chemsys (Na2SO4)
url = '/pourbaix_diagram/reference_data/' + '-'.join(chemsys)
ion_data = self._make_request(url)
ion_ref_comps = [Composition(d['Reference Solid']) for d in ion_data]
ion_ref_elts = list(itertools.chain.from_iterable(
i.elements for i in ion_ref_comps))
ion_ref_entries = self.get_entries_in_chemsys(
list(set([str(e) for e in ion_ref_elts] + ['O', 'H'])),
property_data=['e_above_hull'], compatible_only=False)
compat = MaterialsProjectAqueousCompatibility("Advanced")
ion_ref_entries = compat.process_entries(ion_ref_entries)
ion_ref_pd = PhaseDiagram(ion_ref_entries)
# position the ion energies relative to most stable reference state
for n, i_d in enumerate(ion_data):
ion_entry = IonEntry(Ion.from_formula(i_d['Name']), i_d['Energy'])
refs = [e for e in ion_ref_entries
if e.composition.reduced_formula == i_d['Reference Solid']]
if not refs:
raise ValueError("Reference solid not contained in entry list")
stable_ref = sorted(refs, key=lambda x: x.data['e_above_hull'])[0]
rf = stable_ref.composition.get_reduced_composition_and_factor()[1]
solid_diff = ion_ref_pd.get_form_energy(stable_ref) \
- i_d['Reference solid energy'] * rf
elt = i_d['Major_Elements'][0]
correction_factor = ion_entry.ion.composition[elt] \
/ stable_ref.composition[elt]
ion_entry.energy += solid_diff * correction_factor
pbx_entries.append(PourbaixEntry(ion_entry, 'ion-{}'.format(n)))
# Construct the solid pourbaix entries from filtered ion_ref entries
extra_elts = set(ion_ref_elts) - {Element(s) for s in chemsys} \
- {Element('H'), Element('O')}
for entry in ion_ref_entries:
entry_elts = set(entry.composition.elements)
# Ensure no OH chemsys or extraneous elements from ion references
if not (entry_elts <= {Element('H'), Element('O')} or \
extra_elts.intersection(entry_elts)):
# replace energy with formation energy, use dict to
# avoid messing with the ion_ref_pd and to keep all old params
form_e = ion_ref_pd.get_form_energy(entry)
new_entry = deepcopy(entry)
new_entry.uncorrected_energy = form_e
new_entry.correction = 0.0
pbx_entry = PourbaixEntry(new_entry)
pbx_entries.append(pbx_entry)
return pbx_entries
def get_structure_by_material_id(self, material_id, final=True,
conventional_unit_cell=False):
"""
Get a Structure corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
Structure object.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(material_id, prop=prop)
if conventional_unit_cell:
data[0][prop] = SpacegroupAnalyzer(data[0][prop]). \
get_conventional_standard_structure()
return data[0][prop]
def get_entry_by_material_id(self, material_id, compatible_only=True,
inc_structure=None, property_data=None,
conventional_unit_cell=False):
"""
Get a ComputedEntry corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
ComputedEntry or ComputedStructureEntry object.
"""
data = self.get_entries(material_id, compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell)
return data[0]
def get_dos_by_material_id(self, material_id):
"""
Get a Dos corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/dos
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
Returns:
A Dos object.
"""
data = self.get_data(material_id, prop="dos")
return data[0]["dos"]
def get_bandstructure_by_material_id(self, material_id, line_mode=True):
"""
Get a BandStructure corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or
https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform
Args:
material_id (str): Materials Project material_id.
line_mode (bool): If True, fetch a BandStructureSymmLine object
(default). If False, return the uniform band structure.
Returns:
A BandStructure object.
"""
prop = "bandstructure" if line_mode else "bandstructure_uniform"
data = self.get_data(material_id, prop=prop)
return data[0][prop]
def get_phonon_dos_by_material_id(self, material_id):
"""
Get phonon density of states data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
CompletePhononDos: A phonon DOS object.
"""
return self._make_request("/materials/{}/phonondos".format(material_id))
def get_phonon_bandstructure_by_material_id(self, material_id):
"""
Get phonon dispersion data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
PhononBandStructureSymmLine: A phonon band structure.
"""
return self._make_request("/materials/{}/phononbs".format(material_id))
def get_phonon_ddb_by_material_id(self, material_id):
"""
Get ABINIT Derivative Data Base (DDB) output for phonon calculations.
Args:
material_id (str): Materials Project material_id.
Returns:
str: ABINIT DDB file as a string.
"""
return self._make_request("/materials/{}/abinit_ddb"
.format(material_id))
def get_entries_in_chemsys(self, elements, compatible_only=True,
inc_structure=None, property_data=None,
conventional_unit_cell=False):
"""
Helper method to get a list of ComputedEntries in a chemical system.
For example, elements = ["Li", "Fe", "O"] will return a list of all
entries in the Li-Fe-O chemical system, i.e., all LixOy,
FexOy, LixFey, LixFeyOz, Li, Fe and O phases. Extremely useful for
creating phase diagrams of entire chemical systems.
Args:
elements ([str]): List of element symbols, e.g., ["Li", "Fe",
"O"].
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
List of ComputedEntries.
"""
entries = []
for i in range(len(elements)):
for els in itertools.combinations(elements, i + 1):
entries.extend(
self.get_entries(
"-".join(els), compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell))
return entries
def get_exp_thermo_data(self, formula):
"""
Get a list of ThermoData objects associated with a formula using the
Materials Project REST interface.
Args:
formula (str): A formula to search for.
Returns:
List of ThermoData objects.
"""
return self.get_data(formula, data_type="exp")
def get_exp_entry(self, formula):
"""
Returns an ExpEntry object, which is the experimental equivalent of a
ComputedEntry and can be used for analyses using experimental data.
Args:
formula (str): A formula to search for.
Returns:
An ExpEntry object.
"""
return ExpEntry(Composition(formula),
self.get_exp_thermo_data(formula))
def query(self, criteria, properties, chunk_size=500, max_tries_per_chunk=5,
mp_decode=True):
"""
Performs an advanced query using MongoDB-like syntax for directly
querying the Materials Project database. This allows one to perform
queries which are otherwise too cumbersome to perform using the standard
convenience methods.
Please consult the Materials API documentation at
https://github.com/materialsproject/mapidoc, which provides a
comprehensive explanation of the document schema used in the Materials
Project (supported criteria and properties) and guidance on how best to
query for the relevant information you need.
For queries that request data on more than CHUNK_SIZE materials at once,
this method will chunk a query by first retrieving a list of material
IDs that satisfy CRITERIA, and then merging the criteria with a
restriction to one chunk of materials at a time of size CHUNK_SIZE. You
can opt out of this behavior by setting CHUNK_SIZE=0. To guard against
intermittent server errors in the case of many chunks per query,
possibly-transient server errors will result in re-trying a give chunk
up to MAX_TRIES_PER_CHUNK times.
Args:
criteria (str/dict): Criteria of the query as a string or
mongo-style dict.
If string, it supports a powerful but simple string criteria.
E.g., "Fe2O3" means search for materials with reduced_formula
Fe2O3. Wild cards are also supported. E.g., "\\*2O" means get
all materials whose formula can be formed as \\*2O, e.g.,
Li2O, K2O, etc.
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or \\*2O3: Interpreted as reduced formulas.
Li-Fe-O or \\*-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g. "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Using a full dict syntax, even more powerful queries can be
constructed. For example, {"elements":{"$in":["Li",
"Na", "K"], "$all": ["O"]}, "nelements":2} selects all Li, Na
and K oxides. {"band_gap": {"$gt": 1}} selects all materials
with band gaps greater than 1 eV.
properties (list): Properties to request for as a list. For
example, ["formula", "formation_energy_per_atom"] returns
the formula and formation energy per atom.
chunk_size (int): Number of materials for which to fetch data at a
time. More data-intensive properties may require smaller chunk
sizes. Use chunk_size=0 to force no chunking -- this is useful
when fetching only properties such as 'material_id'.
max_tries_per_chunk (int): How many times to re-try fetching a given
chunk when the server gives a 5xx error (e.g. a timeout error).
mp_decode (bool): Whether to do a decoding to a Pymatgen object
where possible. In some cases, it might be useful to just get
the raw python dict, i.e., set to False.
Returns:
List of results. E.g.,
[{u'formula': {u'O': 1, u'Li': 2.0}},
{u'formula': {u'Na': 2.0, u'O': 2.0}},
{u'formula': {u'K': 1, u'O': 3.0}},
...]
"""
if not isinstance(criteria, dict):
criteria = self.parse_criteria(criteria)
payload = {"criteria": json.dumps(criteria),
"properties": json.dumps(properties)}
if chunk_size == 0:
return self._make_request(
"/query", payload=payload, method="POST", mp_decode=mp_decode)
count_payload = payload.copy()
count_payload["options"] = json.dumps({"count_only": True})
num_results = self._make_request(
"/query", payload=count_payload, method="POST")
if num_results <= chunk_size:
return self._make_request(
"/query", payload=payload, method="POST", mp_decode=mp_decode)
data = []
mids = [d["material_id"] for d in
self.query(criteria, ["material_id"], chunk_size=0)]
chunks = get_chunks(mids, size=chunk_size)
progress_bar = PBar(total=len(mids))
for chunk in chunks:
chunk_criteria = criteria.copy()
chunk_criteria.update({"material_id": {"$in": chunk}})
num_tries = 0
while num_tries < max_tries_per_chunk:
try:
data.extend(self.query(chunk_criteria, properties,
chunk_size=0, mp_decode=mp_decode))
break
except MPRestError as e:
match = re.search(r"error status code (\d+)", e.message)
if match:
if not match.group(1).startswith("5"):
raise e
else: # 5xx error. Try again
num_tries += 1
print(
"Unknown server error. Trying again in five "
"seconds (will try at most {} times)...".format(
max_tries_per_chunk))
sleep(5)
progress_bar.update(len(chunk))
return data
def submit_structures(self, structures, authors, projects=None,
references='', remarks=None, data=None,
histories=None, created_at=None):
"""
Submits a list of structures to the Materials Project as SNL files.
The argument list mirrors the arguments for the StructureNL object,
except that a list of structures with the same metadata is used as an
input.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
structures: A list of Structure objects
authors (list): List of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
data ([dict]): A list of free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at (datetime): A datetime object
Returns:
A list of inserted submission ids.
"""
from pymatgen.util.provenance import StructureNL
snl_list = StructureNL.from_structures(structures, authors, projects,
references, remarks, data,
histories, created_at)
self.submit_snl(snl_list)
def submit_snl(self, snl):
"""
Submits a list of StructureNL to the Materials Project site.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl (StructureNL/[StructureNL]): A single StructureNL, or a list
of StructureNL objects
Returns:
A list of inserted submission ids.
Raises:
MPRestError
"""
try:
snl = snl if isinstance(snl, list) else [snl]
jsondata = [s.as_dict() for s in snl]
payload = {"snl": json.dumps(jsondata, cls=MontyEncoder)}
response = self.session.post("{}/snl/submit".format(self.preamble),
data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp['inserted_ids']
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def delete_snl(self, snl_ids):
"""
Delete earlier submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl_ids: List of SNL ids.
Raises:
MPRestError
"""
try:
payload = {"ids": json.dumps(snl_ids)}
response = self.session.post(
"{}/snl/delete".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def query_snl(self, criteria):
"""
Query for submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
criteria (dict): Query criteria.
Returns:
A dict, with a list of submitted SNLs in the "response" key.
Raises:
MPRestError
"""
try:
payload = {"criteria": json.dumps(criteria)}
response = self.session.post("{}/snl/query".format(self.preamble),
data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def submit_vasp_directory(self, rootdir, authors, projects=None,
references='', remarks=None, master_data=None,
master_history=None, created_at=None,
ncpus=None):
"""
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir (str): Rootdir to start assimilating VASP runs from.
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
master_data (dict): A free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. This data is added to all structures detected in the
directory, in addition to other vasp data on a per structure
basis.
master_history: A master history to be added to all entries.
created_at (datetime): A datetime object
ncpus (int): Number of cpus to use in using BorgQueen to
assimilate. Defaults to None, which means serial.
"""
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate(rootdir)
structures = []
metadata = []
histories = []
for e in queen.get_data():
structures.append(e.structure)
m = {
"_vasp": {
"parameters": e.parameters,
"final_energy": e.energy,
"final_energy_per_atom": e.energy_per_atom,
"initial_structure": e.data["initial_structure"].as_dict()
}
}
if "history" in e.parameters:
histories.append(e.parameters["history"])
if master_data is not None:
m.update(master_data)
metadata.append(m)
if master_history is not None:
histories = master_history * len(structures)
return self.submit_structures(
structures, authors, projects=projects, references=references,
remarks=remarks, data=metadata, histories=histories,
created_at=created_at)
def get_stability(self, entries):
"""
Returns the stability of all entries.
"""
try:
payload = {"entries": json.dumps(entries, cls=MontyEncoder)}
response = self.session.post("{}/phase_diagram/calculate_stability"
.format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / \
entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el),
mp_decode=False)[0]
isolated_atom_e_sum += e['output']["final_energy"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula/n if per_atom else ecoh_per_formula
def get_reaction(self, reactants, products):
"""
Gets a reaction from the Materials Project.
Args:
reactants ([str]): List of formulas
products ([str]): List of formulas
Returns:
rxn
"""
return self._make_request("/reaction",
payload={"reactants[]": reactants,
"products[]": products}, mp_decode=False)
def get_substrates(self, material_id, number=50, orient=None):
"""
Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return;
n=0 returns all available matches
Returns:
list of dicts with substrate matches
"""
req = "/materials/{}/substrates?n={}".format(material_id, number)
if orient:
req += "&orient={}".format(" ".join(map(str, orient)))
return self._make_request(req)
def get_all_substrates(self):
"""
Gets the list of all possible substrates considered in the
Materials Project substrate database
Returns:
list of material_ids corresponding to possible substrates
"""
return self._make_request("/materials/all_substrate_ids")
def get_surface_data(self, material_id, inc_structures=False):
"""
Gets surface data for a material. Useful for Wulff shapes.
Reference for surface data:
Tran, R., Xu, Z., Radhakrishnan, B., Winston, D., Sun, W., Persson, K.
A., & Ong, S. P. (2016). Data Descripter: Surface energies of elemental
crystals. Scientific Data, 3(160080), 1–13.
http://dx.doi.org/10.1038/sdata.2016.80
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
inc_structures (bool): Include final surface slab structures.
These are unnecessary for Wulff shape construction.
Returns:
Surface data for material. Energies are given in SI units (J/m^2).
"""
req = "/materials/{}/surfaces".format(material_id)
if inc_structures:
req += "?include_structures=true"
return self._make_request(req)
def get_wulff_shape(self, material_id):
"""
Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape
"""
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)["surfaces"]
lattice = (SpacegroupAnalyzer(structure)
.get_conventional_standard_structure().lattice)
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf["miller_index"])
# Prefer reconstructed surfaces, which have lower surface energies.
if (miller not in miller_energy_map) or surf["is_reconstructed"]:
miller_energy_map[miller] = surf["surface_energy"]
millers, energies = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies)
def get_interface_reactions(self, reactant1, reactant2,
open_el=None, relative_mu=None,
use_hull_energy=False):
"""
Gets critical reactions between two reactants.
Get critical reactions ("kinks" in the mixing ratio where
reaction products change) between two reactants. See the
`pymatgen.analysis.interface_reactions` module for more info.
Args:
reactant1 (str): Chemical formula for reactant
reactant2 (str): Chemical formula for reactant
open_el (str): Element in reservoir available to system
relative_mu (float): Relative chemical potential of element in
reservoir with respect to pure substance. Must be non-positive.
use_hull_energy (bool): Whether to use the convex hull energy for a
given composition for the reaction energy calculation. If false,
the energy of the ground state structure will be preferred; if a
ground state can not be found for a composition, the convex hull
energy will be used with a warning message.
Returns:
list: list of dicts of form {ratio,energy,rxn} where `ratio` is the
reactant mixing ratio, `energy` is the reaction energy
in eV/atom, and `rxn` is a
`pymatgen.analysis.reaction_calculator.Reaction`.
"""
payload = {"reactants": " ".join([reactant1, reactant2]),
"open_el": open_el,
"relative_mu": relative_mu,
"use_hull_energy": use_hull_energy}
return self._make_request("/interface_reactions",
payload=payload, method="POST")
@staticmethod
def parse_criteria(criteria_string):
"""
Parses a powerful and simple string criteria and generates a proper
mongo syntax criteria.
Args:
criteria_string (str): A string representing a search criteria.
Also supports wild cards. E.g.,
something like "*2O" gets converted to
{'pretty_formula': {'$in': [u'B2O', u'Xe2O', u"Li2O", ...]}}
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g., "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Returns:
A mongo query dict.
"""
toks = criteria_string.split()
def parse_sym(sym):
if sym == "*":
return [el.symbol for el in Element]
else:
m = re.match(r"\{(.*)\}", sym)
if m:
return [s.strip() for s in m.group(1).split(",")]
else:
return [sym]
def parse_tok(t):
if re.match(r"\w+-\d+", t):
return {"task_id": t}
elif "-" in t:
elements = [parse_sym(sym) for sym in t.split("-")]
chemsyss = []
for cs in itertools.product(*elements):
if len(set(cs)) == len(cs):
# Check for valid symbols
cs = [Element(s).symbol for s in cs]
chemsyss.append("-".join(sorted(cs)))
return {"chemsys": {"$in": chemsyss}}
else:
all_formulas = set()
explicit_els = []
wild_card_els = []
for sym in re.findall(
r"(\*[\.\d]*|\{.*\}[\.\d]*|[A-Z][a-z]*)[\.\d]*", t):
if ("*" in sym) or ("{" in sym):
wild_card_els.append(sym)
else:
m = re.match(r"([A-Z][a-z]*)[\.\d]*", sym)
explicit_els.append(m.group(1))
nelements = len(wild_card_els) + len(set(explicit_els))
parts = re.split(r"(\*|\{.*\})", t)
parts = [parse_sym(s) for s in parts if s != ""]
for f in itertools.product(*parts):
c = Composition("".join(f))
if len(c) == nelements:
# Check for valid Elements in keys.
for e in c.keys():
Element(e.symbol)
all_formulas.add(c.reduced_formula)
return {"pretty_formula": {"$in": list(all_formulas)}}
if len(toks) == 1:
return parse_tok(toks[0])
else:
return {"$or": list(map(parse_tok, toks))}
class MPRestError(Exception):
"""
Exception class for MPRestAdaptor.
Raised when the query has problems, e.g., bad query format.
"""
pass
| dongsenfo/pymatgen | pymatgen/ext/matproj.py | Python | mit | 55,884 | [
"ABINIT",
"VASP",
"pymatgen"
] | a4cf6aa8e4eb528c6026ed2661f77cf0ee421afec9a40ee04d8cbb7c2dacde63 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import re
yes = re.compile(r'^(yes|true|on|1$)', re.IGNORECASE)
no = re.compile(r'^(no|false|off|0$)', re.IGNORECASE)
der0th = re.compile(r'^(0|none|energy)', re.IGNORECASE)
der1st = re.compile(r'^(1|first|gradient)', re.IGNORECASE)
der2nd = re.compile(r'^(2|second|hessian)', re.IGNORECASE)
| CDSherrill/psi4 | psi4/driver/p4util/p4regex.py | Python | lgpl-3.0 | 1,207 | [
"Psi4"
] | 9291304be731477462987fd776fb9ce4140dc4fd3cc7fbfa6473b18d974d9ae4 |
from grid_exper import *
from plastk.plot import GPlot
from Scientific.IO.NetCDF import NetCDFFile as CDF
for c in exp.conditions:
names = pkl.files_matching('*%s*%s*-episodes.cdf'%(c['agent_name'],c['grid_name']))
files = [CDF(f) for f in names]
c['data'] = [f.variables['length'][:,0] for f in files]
plots = dict(grid1 = GPlot(), grid2=GPlot())
for k,p in plots.iteritems():
p('set logscale y')
p.xlabel('Episodes')
p.ylabel('Steps')
p.title('Episode length for %s'%k.capitalize())
for c in exp.conditions:
grid_name = c['grid_name']
agent_name = c['agent_name']
data = c['data']
plots[grid_name].plot_avg(y = data,title=agent_name,replot=1,step=10)
| ronaldahmed/robot-navigation | neural-navigation-with-lstm/MARCO/plastk/examples/grid_exper_analyze.py | Python | mit | 721 | [
"NetCDF"
] | bf3570d40d0c1f3fa6ab443b4c78f9bedbfbabcc8089ae924c657968eb9490ff |
"""
Utility functions for working with bam files.
"""
import os
import shutil
import subprocess
from django.conf import settings
import pysam
import numpy as np
from utils import convert_fasta_to_fastq
BWA_BINARY = os.path.join(settings.TOOLS_DIR, 'bwa/bwa')
def clipping_stats(bam_path, sample_size=1000):
BAM_CSOFT_CLIP = 4
BAM_CHARD_CLIP = 5
clip_codes = [BAM_CSOFT_CLIP, BAM_CHARD_CLIP]
samfile = pysam.AlignmentFile(bam_path)
sample_size = min(sample_size, samfile.mapped)
terminal_clipping = []
i = 0
for read in samfile:
if not read.is_unmapped:
first_cig = read.cigartuples[0]
last_cig = read.cigartuples[-1]
terminal_clipping.append(max([
first_cig[1] if first_cig[0] in clip_codes else 0,
last_cig[1] if last_cig[0] in clip_codes else 0]))
i += 1
if i == sample_size:
break
return {'mean': np.mean(terminal_clipping),
'std': np.std(terminal_clipping)}
def index_bam(bam):
cmd = "{samtools} index {bam}".format(
samtools=settings.SAMTOOLS_BINARY,
bam=bam)
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
def sort_bam_by_name(input_bam, output_bam=None):
if output_bam is None:
output_bam = input_bam
cmd = "{samtools} sort -n {input_bam} {output_bam_prefix}".format(
samtools=settings.SAMTOOLS_BINARY,
input_bam=input_bam,
output_bam_prefix=os.path.splitext(output_bam)[0])
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
def sort_bam_by_coordinate(input_bam, output_bam=None):
if output_bam is None:
output_bam = input_bam
cmd = "{samtools} sort {input_bam} {output_bam_prefix}".format(
samtools=settings.SAMTOOLS_BINARY,
input_bam=input_bam,
output_bam_prefix=os.path.splitext(output_bam)[0])
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
def make_sam(bam, sam_filename=None):
if sam_filename is None:
sam_filename = os.path.splitext(bam)[0] + ".sam"
cmd = "{samtools} view -h {bam} > {sam}".format(
samtools=settings.SAMTOOLS_BINARY,
bam=bam,
sam=sam_filename)
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
def make_bam(sam, bam_filename=None):
if bam_filename is None:
bam_filename = os.path.splitext(sam)[0] + ".bam"
cmd = "{samtools} view -b -S {sam} > {bam}".format(
samtools=settings.SAMTOOLS_BINARY,
sam=sam,
bam=bam_filename)
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
def concatenate_bams(bam_list, output):
cmd = "{samtools} cat -o {output} {bam_files}".format(
samtools=settings.SAMTOOLS_BINARY,
bam_files=" ".join(bam_list),
output=output)
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
def rmdup(input_bam_file, output_bam_file):
""" Remove duplicate lines from a SAM file, implemented with sort/uniq
as samtools 0.1.20 has known bugs with its rmdup function
"""
# Store input sam header
output_sam = os.path.splitext(output_bam_file)[0] + ".sam"
subprocess.check_call(
' '.join(
[settings.SAMTOOLS_BINARY, 'view', '-H', '-o',
output_sam, input_bam_file]),
shell=True, executable=settings.BASH_PATH)
# Convert input bam to sam, sort, remove duplicate adjacent lines,
# and append to header
cmd = ' | '.join([
settings.SAMTOOLS_BINARY + ' view ' + input_bam_file,
'sort',
'uniq'
]) + ' >> ' + output_sam
subprocess.check_call(cmd, shell=True, executable=settings.BASH_PATH)
make_bam(output_sam, output_bam_file)
# delete SAM intermediate file.
os.remove(output_sam)
def filter_bam_file_by_row(input_bam_path, filter_fn, output_bam_path):
"""Filters rows out of a bam file that don't pass a given filter function.
This function keeps all header lines.
Args:
input_bam_path: Absolute path to input bam file.
filter_fn: Function applied to each row of the input bam and returns a
Boolean. If True, keeps the row.
output_bam_path: Absolute path to the output bam file.
"""
output_root = os.path.splitext(output_bam_path)[0]
initial_sam_intermediate = output_root + '.sam'
filtered_sam_intermediate = output_root + '.filtered.sam'
final_bam = output_root + '.filtered.bam'
# Convert to SAM (preserve header with -h option).
with open(initial_sam_intermediate, 'w') as output_fh:
subprocess.call(
[settings.SAMTOOLS_BINARY, 'view', '-h', input_bam_path],
stdout=output_fh)
# Filter.
with open(filtered_sam_intermediate, 'w') as output_fh:
with open(initial_sam_intermediate) as input_fh:
for line in input_fh:
# Always write header lines.
if line[0] == '@':
output_fh.write(line)
continue
if filter_fn(line):
output_fh.write(line)
continue
# Write final bam.
with open(final_bam, 'w') as fh:
subprocess.call(
[settings.SAMTOOLS_BINARY, 'view', '-bS',
filtered_sam_intermediate],
stdout=fh)
# Move temp file to the original file location.
shutil.move(final_bam, output_bam_path)
# Delete intermediate files.
os.remove(initial_sam_intermediate)
os.remove(filtered_sam_intermediate)
def minimal_bwa_align(reads, ref_fasta, data_dir):
# 0. Interpret reads file type
if all([r.endswith(".fa") for r in reads]):
reads_fq = [os.path.join(data_dir, os.path.splitext(r)[0] + ".fq")
for r in reads]
for i, r in enumerate(reads):
convert_fasta_to_fastq(r, reads_fq[i])
elif all([r.endswith(".fq") for r in reads]):
reads_fq = reads
else:
raise(Exception("All reads must have file extension .fq or .fa"))
filename_prefix = os.path.join(data_dir, "bwa_align.alignment")
# 1. bwa index ref.fa #TODO: Check if already indexed
cmd = "{bwa} index {ref_path}".format(
bwa=BWA_BINARY,
ref_path=ref_fasta)
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
# 2. bwa mem ref.fa contigs.fq > alignment.sam
alignment_sam = filename_prefix + ".sam"
cmd = "{bwa} mem {ref_fasta} {contigs_fastq} > {alignment_sam}".format(
bwa=BWA_BINARY,
contigs_fastq=" ".join(reads_fq),
ref_fasta=ref_fasta,
alignment_sam=alignment_sam)
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
# 3. samtools view -b alignment.sam > alignment.bam
alignment_bam = filename_prefix + ".bam"
cmd = "{samtools} view -b -S {alignment_sam} > {alignment_bam}".format(
samtools=settings.SAMTOOLS_BINARY,
alignment_sam=alignment_sam,
alignment_bam=alignment_bam)
subprocess.call(cmd, shell=True, executable=settings.BASH_PATH)
# 4. samtools sort alignment.bam
sort_bam_by_coordinate(alignment_bam)
# 5. Index it
index_bam(alignment_bam)
return alignment_bam
| churchlab/millstone | genome_designer/utils/bam_utils.py | Python | mit | 7,468 | [
"BWA",
"pysam"
] | a7cecc7143bb94c7794bb636819eda3397ad7072a0ddabec4e8e4292372d9fcb |
# import vtk wrapped version that will raise exceptions for error events
import vtkwithexceptions as vtk
import imp
import tempfile
from visomics.vtk.common import LoadInput, SerializeOutput, parse_json
from celery import Celery
from celery import task, current_task
from celery.result import AsyncResult
celery = Celery()
celery.config_from_object('celeryconfig')
@celery.task
def run(input):
task_description = parse_json(input);
# load inputs into a dictionary
inputs = {}
for name, type, format, data in task_description['inputs']:
inputs[name] = LoadInput(type, format, data)
# load incoming Python script code into a custom module
module_code = task_description['script']
custom = imp.new_module("custom")
exec module_code in custom.__dict__
# call the custom Python code and get its output
outputs = custom.execute(inputs)
# wrap the output up into a dictionary & return
output_list = []
for name, object in outputs.iteritems():
type = object.GetClassName()
data = SerializeOutput(object)
d = {"name": name, "type": type, "data": data }
output_list.append(d)
output_json = {"output": output_list}
return output_json
| Visomics/Visomics | AnalysisServer/visomics/vtk/python.py | Python | apache-2.0 | 1,183 | [
"VTK"
] | 5ae5fc935eba6ef03536193daad6d9792199bc6bf4a8d1cc528d7dc1e05b4086 |
#!/usr/bin/env python
"""Compile tests."""
from bowtie import App
from bowtie.control import Nouislider
from bowtie.visual import Plotly
from bowtie.tests.utils import reset_uuid
def callback(*_):
"""dummy function"""
# pylint: disable=unused-argument
def test_build(build_reset, monkeypatch):
"""Tests the build process."""
reset_uuid()
ctrl = Nouislider()
viz = Plotly()
app = App(__name__, sidebar=True)
app.add_sidebar(ctrl)
app.add(viz)
app.subscribe(ctrl.on_change)(callback)
# pylint: disable=protected-access
app._build()
| jwkvam/bowtie | bowtie/tests/test_compile.py | Python | mit | 580 | [
"Bowtie"
] | cfefaa2a671b6bf8f904fc84d12d757eae0e29773c6d7d576188f0748f7d8bfa |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from conary_test import rephelp
from conary.local import schema
class ErrorOutputTest(rephelp.RepositoryHelper):
def testDatabaseSchemaErrors(self):
db = self.openDatabase()
db.writeAccess()
cu = db.db.db.cursor()
cu.execute('update databaseVersion set version=10000')
db.db.db.commit()
try:
db2 = self.openDatabase()
db2.writeAccess()
except schema.NewDatabaseSchema, err:
assert(str(err) == '''The conary database on this system is too new. You may have multiple versions of conary installed and be running the wrong one, or your conary may have been downgraded. Please visit http://wiki.rpath.com for information on how to get support.''')
else:
assert(0)
cu.execute('update databaseVersion set version=1')
db.db.db.commit()
os.chmod(self.rootDir + self.cfg.dbPath + '/conarydb', 0400)
try:
db2 = self.openDatabase()
db2.writeAccess()
except schema.OldDatabaseSchema, err:
assert(str(err) == '''\
The Conary database on this system is too old. It will be
automatically converted as soon as you run Conary with
write permissions for the database (which normally means
as root).
''')
else:
assert(0)
os.chmod(self.rootDir + self.cfg.dbPath + '/conarydb', 0600)
cu.execute('drop table databaseVersion')
db.db.db.commit()
try:
db2 = self.openDatabase()
db2.writeAccess()
except schema.OldDatabaseSchema, err:
assert(str(err) == '''\
The Conary database on this system is too old. For information on how to
convert this database, please visit http://wiki.rpath.com/ConaryConversion.''')
else:
assert(0)
| fedora-conary/conary | conary_test/localtest/errorstest.py | Python | apache-2.0 | 2,422 | [
"VisIt"
] | 7b84433854ca6cce45ed99f8da77c267437609be378d1bb7dcd2368b81da4896 |
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
import ipfdicttype
import cv
class IPFSmoothingType(ipfdicttype.IPFDictType):
""" Smoothing type dict
"""
name = "IPFSmoothingType"
dictionary = {"Blur" : cv.CV_BLUR,
"Gaussian" : cv.CV_GAUSSIAN,
"Median" : cv.CV_MEDIAN,
# "Bilateral" : cv.CV_BILATERAL, # TODO: Need understand algorithm and use param3 and param4
}
def __init__(self):
pass
@classmethod
def default_value(cls):
""" Return default value for this type """
return cls.dictionary["Blur"]
| anton-golubkov/Garland | src/ipf/ipftype/ipfsmoothingtype.py | Python | lgpl-2.1 | 1,155 | [
"Gaussian"
] | 665984de54dd25f72aa460aba945932dd97944f876d5d207b402e9eab0147584 |
# proxy module
from __future__ import absolute_import
from mayavi.filters.tube import *
| enthought/etsproxy | enthought/mayavi/filters/tube.py | Python | bsd-3-clause | 88 | [
"Mayavi"
] | 5539a483e312f97b78dc375dbf4affbe25da44067fe019792fa4b7decd899b3f |
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Module.
"""
import os
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipIf
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.video.video import VideoComponentPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class CMSVideoBaseTest(UniqueCourseTest):
"""
CMS Video Module Base Test Class
"""
def setUp(self):
"""
Initialization of pages and course fixture for tests
"""
super(CMSVideoBaseTest, self).setUp()
self.video = VideoComponentPage(self.browser)
# This will be initialized later
self.unit_page = None
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.assets = []
self.addCleanup(YouTubeStubConfig.reset)
def _create_course_unit(self, youtube_stub_config=None, subtitles=False):
"""
Create a Studio Video Course Unit and Navigate to it.
Arguments:
youtube_stub_config (dict)
subtitles (bool)
"""
if youtube_stub_config:
YouTubeStubConfig.configure(youtube_stub_config)
if subtitles:
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
self.navigate_to_course_unit()
def _create_video(self):
"""
Create Xblock Video Component.
"""
self.video.create_video()
video_xblocks = self.video.xblocks()
# Total video xblock components count should be equals to 2
# Why 2? One video component is created by default for each test. Please see
# test_studio_video_module.py:CMSVideoTest._create_course_unit
# And we are creating second video component here.
self.assertTrue(video_xblocks == 2)
def _install_course_fixture(self):
"""
Prepare for tests by creating a course with a section, subsection, and unit.
Performs the following:
Create a course with a section, subsection, and unit
Create a user and make that user a course author
Log the user into studio
"""
if self.assets:
self.course_fixture.add_asset(self.assets)
# Create course with Video component
self.course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('video', 'Video')
)
)
)
).install()
# Auto login and register the course
AutoAuthPage(
self.browser,
staff=False,
username=self.course_fixture.user.get('username'),
email=self.course_fixture.user.get('email'),
password=self.course_fixture.user.get('password')
).visit()
def _navigate_to_course_unit_page(self):
"""
Open the course from the dashboard and expand the section and subsection and click on the Unit link
The end result is the page where the user is editing the newly created unit
"""
# Visit Course Outline page
self.outline.visit()
# Visit Unit page
self.unit_page = self.outline.section('Test Section').subsection('Test Subsection').expand_subsection().unit(
'Test Unit').go_to()
self.video.wait_for_video_component_render()
def navigate_to_course_unit(self):
"""
Install the course with required components and navigate to course unit page
"""
self._install_course_fixture()
self._navigate_to_course_unit_page()
def edit_component(self, xblock_index=1):
"""
Open component Edit Dialog for first component on page.
Arguments:
xblock_index: number starting from 1 (0th entry is the unit page itself)
"""
self.unit_page.xblocks[xblock_index].edit()
def open_advanced_tab(self):
"""
Open components advanced tab.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].open_advanced_tab()
def open_basic_tab(self):
"""
Open components basic tab.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].open_basic_tab()
def save_unit_settings(self):
"""
Save component settings.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].save_settings()
@attr('shard_4')
class CMSVideoTest(CMSVideoBaseTest):
"""
CMS Video Test Class
"""
def test_youtube_stub_proxy(self):
"""
Scenario: YouTube stub server proxies YouTube API correctly
Given youtube stub server proxies YouTube API
And I have created a Video component
Then I can see video button "play"
And I click video button "play"
Then I can see video button "pause"
"""
self._create_course_unit(youtube_stub_config={'youtube_api_blocked': False})
self.assertTrue(self.video.is_button_shown('play'))
self.video.click_player_button('play')
self.video.wait_for_state('playing')
self.assertTrue(self.video.is_button_shown('pause'))
def test_youtube_stub_blocks_youtube_api(self):
"""
Scenario: YouTube stub server can block YouTube API
Given youtube stub server blocks YouTube API
And I have created a Video component
Then I do not see video button "play"
"""
self._create_course_unit(youtube_stub_config={'youtube_api_blocked': True})
self.assertFalse(self.video.is_button_shown('play'))
def test_autoplay_is_disabled(self):
"""
Scenario: Autoplay is disabled in Studio
Given I have created a Video component
Then when I view the video it does not have autoplay enabled
"""
self._create_course_unit()
self.assertFalse(self.video.is_autoplay_enabled)
def test_video_creation_takes_single_click(self):
"""
Scenario: Creating a video takes a single click
And creating a video takes a single click
"""
self._create_course_unit()
# This will create a video by doing a single click and then ensure that video is created
self._create_video()
def test_captions_hidden_correctly(self):
"""
Scenario: Captions are hidden correctly
Given I have created a Video component with subtitles
And I have hidden captions
Then when I view the video it does not show the captions
"""
self._create_course_unit(subtitles=True)
self.video.hide_captions()
self.assertFalse(self.video.is_captions_visible())
def test_video_controls_shown_correctly(self):
"""
Scenario: Video controls for all videos show correctly
Given I have created two Video components
And first is private video
When I reload the page
Then video controls for all videos are visible
"""
self._create_course_unit(youtube_stub_config={'youtube_api_private_video': True})
self.video.create_video()
# change id of first default video
self.edit_component(1)
self.open_advanced_tab()
self.video.set_field_value('YouTube ID', 'sampleid123')
self.save_unit_settings()
# again open unit page and check that video controls show for both videos
self._navigate_to_course_unit_page()
self.assertTrue(self.video.is_controls_visible())
def test_captions_shown_correctly(self):
"""
Scenario: Captions are shown correctly
Given I have created a Video component with subtitles
Then when I view the video it does show the captions
"""
self._create_course_unit(subtitles=True)
self.assertTrue(self.video.is_captions_visible())
def test_captions_toggling(self):
"""
Scenario: Captions are toggled correctly
Given I have created a Video component with subtitles
And I have toggled captions
Then when I view the video it does show the captions
"""
self._create_course_unit(subtitles=True)
self.video.click_player_button('transcript_button')
self.assertFalse(self.video.is_captions_visible())
self.video.click_player_button('transcript_button')
self.assertTrue(self.video.is_captions_visible())
def test_caption_line_focus(self):
"""
Scenario: When enter key is pressed on a caption, an outline shows around it
Given I have created a Video component with subtitles
And Make sure captions are opened
Then I focus on first caption line
And I see first caption line has focused
"""
self._create_course_unit(subtitles=True)
self.video.show_captions()
self.video.focus_caption_line(2)
self.assertTrue(self.video.is_caption_line_focused(2))
def test_slider_range_works(self):
"""
Scenario: When start and end times are specified, a range on slider is shown
Given I have created a Video component with subtitles
And Make sure captions are closed
And I edit the component
And I open tab "Advanced"
And I set value "00:00:12" to the field "Video Start Time"
And I set value "00:00:24" to the field "Video Stop Time"
And I save changes
And I click video button "play"
Then I see a range on slider
"""
self._create_course_unit(subtitles=True)
self.video.hide_captions()
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Video Start Time', '00:00:12')
self.video.set_field_value('Video Stop Time', '00:00:24')
self.save_unit_settings()
self.video.click_player_button('play')
@attr('a11y')
class CMSVideoA11yTest(CMSVideoBaseTest):
"""
CMS Video Accessibility Test Class
"""
def setUp(self):
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# the a11y tests run in CI under phantomjs which doesn't
# support html5 video or flash player, so the video tests
# don't work in it. We still want to be able to run these
# tests in CI, so override the browser setting if it is
# phantomjs.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(CMSVideoA11yTest, self).setUp()
def test_video_player_a11y(self):
# Limit the scope of the audit to the video player only.
self.outline.a11y_audit.config.set_scope(include=["div.video"])
self.outline.a11y_audit.config.set_rules({
"ignore": [
'link-href', # TODO: AC-223
],
})
self._create_course_unit()
self.outline.a11y_audit.check_for_accessibility_errors()
| ahmadiga/min_edx | common/test/acceptance/tests/video/test_studio_video_module.py | Python | agpl-3.0 | 11,780 | [
"VisIt"
] | d1e7e0bf8795cb8ba8beedb75e38cb6bfd8e36f3b7829e48b5c985ac424225e9 |
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic model util functions."""
import numpy as np
import six
import tensorflow as tf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def create_look_ahead_mask(seq_length, batch_size=0):
"""Create a look ahead mask given a certain seq length.
Args:
seq_length: int the length of the sequence.
batch_size: if batch_size if provided, the mask will be repeaded.
Returns:
the mask ((batch_size), seq_length, seq_length)
"""
mask = 1 - tf.linalg.band_part(tf.ones((seq_length, seq_length)), -1, 0)
if batch_size > 0:
mask = tf.repeat(tf.expand_dims(mask, axis=0), batch_size, axis=0)
return mask
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor)
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.keras.activations.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_shape_list(tensor):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = get_shape_list(sequence_tensor)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
output_tensor = tf.reshape(output_tensor, [batch_size, -1, width])
return output_tensor
def split_heads(x, batch_size, seq_length, num_joints, num_attention_heads,
model_depth):
"""Split the embedding vector for different heads for the spatial attention.
Args:
x: the embedding vector (batch_size, seq_len, num_joints, model_depth) or
(batch_size, seq_len, model_depth)
batch_size: the batch_size
seq_length: the sequence length
num_joints: the number of joints
num_attention_heads: the number of attention heads
model_depth: the model depth
Returns:
the split vector (batch_size, seq_len, num_heads, num_joints, depth) or
(batch_size, num_heads, seq_len, depth)
"""
depth = model_depth // num_attention_heads
if len(x.get_shape().as_list()) == 4:
# Input shape (batch_size, seq_len, num_joints, model_depth)
x = tf.reshape(
x, (batch_size, seq_length, num_joints, num_attention_heads, depth))
return tf.transpose(x, perm=[0, 1, 3, 2, 4])
elif len(x.get_shape().as_list()) == 3:
# Input shape (batch_size, seq_len, model_depth)
x = tf.reshape(x, (batch_size, seq_length, num_attention_heads, depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
else:
raise ValueError("Unsupported input tensor dimension.")
def scaled_dot_product_attention(q, k, v, mask):
"""The scaled dot product attention mechanism.
Attn(Q, K, V) = softmax((QK^T+mask)/sqrt(depth))V.
Args:
q: the query vectors matrix (..., attn_dim, d_model/num_heads)
k: the key vector matrix (..., attn_dim, d_model/num_heads)
v: the value vector matrix (..., attn_dim, d_model/num_heads)
mask: a mask for attention
Returns:
the updated encoding and the attention weights matrix
"""
matmul_qk = tf.matmul(
q, k, transpose_b=True) # (..., num_heads, attn_dim, attn_dim)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# normalized on the last axis (seq_len_k) so that the scores add up to 1.
attention_weights = tf.nn.softmax(
scaled_attention_logits, axis=-1) # (..., num_heads, attn_dim, attn_dim)
output = tf.matmul(attention_weights, v) # (..., num_heads, attn_dim, depth)
return output, attention_weights
| google-research/mint | mint/core/base_model_util.py | Python | apache-2.0 | 8,036 | [
"Gaussian"
] | 51871c993e46a57b75cea2026e41c5b1d595f1c44b71b7722e764487316486e4 |
from vtk import *
from math import *
# -----------------------------------------------------------------------------
# Set of helper functions
# -----------------------------------------------------------------------------
def normalize(vect, tolerance=0.00001):
mag2 = sum(n * n for n in vect)
if abs(mag2 - 1.0) > tolerance:
mag = sqrt(mag2)
vect = tuple(n / mag for n in vect)
return vect
def q_mult(q1, q2):
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return w, x, y, z
def q_conjugate(q):
w, x, y, z = q
return (w, -x, -y, -z)
def qv_mult(q1, v1):
q2 = (0.0,) + v1
return q_mult(q_mult(q1, q2), q_conjugate(q1))[1:]
def axisangle_to_q(v, theta):
v = normalize(v)
x, y, z = v
theta /= 2
w = cos(theta)
x = x * sin(theta)
y = y * sin(theta)
z = z * sin(theta)
return w, x, y, z
def vectProduct(axisA, axisB):
xa, ya, za = axisA
xb, yb, zb = axisB
normalVect = (ya*zb - za*yb, za*xb - xa*zb, xa*yb - ya*xb)
normalVect = normalize(normalVect)
return normalVect
def dotProduct(vecA, vecB):
return (vecA[0] * vecB[0]) + (vecA[1] * vecB[1]) + (vecA[2] * vecB[2])
def rotate(axis, angle, center, point):
angleInRad = 3.141592654 * angle / 180.0
rotation = axisangle_to_q(axis, angleInRad)
tPoint = tuple((point[i] - center[i]) for i in range(3))
rtPoint = qv_mult(rotation, tPoint)
rPoint = tuple((rtPoint[i] + center[i]) for i in range(3))
return rPoint
# -----------------------------------------------------------------------------
# Spherical Camera
# -----------------------------------------------------------------------------
class SphericalCamera(object):
def __init__(self, dataHandler, focalPoint, position, phiAxis, phiAngles, thetaAngles):
self.dataHandler = dataHandler
self.cameraSettings = []
self.thetaBind = { "mouse" : { "drag" : { "modifier": 0, "coordinate": 1, "step": 30 , "orientation": 1} } }
self.phiBind = { "mouse" : { "drag" : { "modifier": 0, "coordinate": 0, "step": 30 , "orientation": 1} } }
# Convert to serializable type
fp = tuple(i for i in focalPoint)
# Register arguments to the data handler
if len(phiAngles) > 1 and phiAngles[-1] + phiAngles[1] == 360:
self.dataHandler.registerArgument(priority=0, name='phi', values=phiAngles, ui='slider', loop='modulo', bind=self.phiBind)
else:
self.dataHandler.registerArgument(priority=0, name='phi', values=phiAngles, ui='slider', bind=self.phiBind)
if thetaAngles[0] < 0 and thetaAngles[0] >= -90:
idx = 0
for theta in thetaAngles:
if theta < 0:
idx += 1
self.dataHandler.registerArgument(priority=0, name='theta', values=[ (x+90) for x in thetaAngles ], ui='slider', default=idx, bind=self.thetaBind)
else:
self.dataHandler.registerArgument(priority=0, name='theta', values=thetaAngles, ui='slider', bind=self.thetaBind)
# Compute all camera settings
for theta in thetaAngles:
for phi in phiAngles:
phiPos = rotate(phiAxis, -phi, fp, position)
thetaAxis = vectProduct(phiAxis, tuple(fp[i]-phiPos[i] for i in range(3)))
thetaPhiPos = rotate(thetaAxis, theta, fp, phiPos)
viewUp = rotate(thetaAxis, theta, (0,0,0), phiAxis)
self.cameraSettings.append({
'theta': theta,
'thetaIdx': thetaAngles.index(theta),
'phi': phi,
'phiIdx': phiAngles.index(phi),
'focalPoint': fp,
'position': thetaPhiPos,
'viewUp': viewUp
})
self.dataHandler.updateBasePattern()
def updatePriority(self, priorityList):
keyList = ['theta', 'phi']
for idx in range(min(len(priorityList), len(keyList))):
self.dataHandler.updatePriority(keyList[idx], priorityList[idx])
def __iter__(self):
for cameraData in self.cameraSettings:
self.dataHandler.setArguments(phi=cameraData['phiIdx'], theta=cameraData['thetaIdx'])
yield cameraData
# -----------------------------------------------------------------------------
# Cylindrical Camera
# -----------------------------------------------------------------------------
class CylindricalCamera(object):
def __init__(self, dataHandler, focalPoint, position, rotationAxis, phiAngles, translationValues):
self.dataHandler = dataHandler
self.cameraSettings = []
# Register arguments to the data handler
self.dataHandler.registerArgument(priority=0, name='phi', values=phiAngles, ui='slider', loop='modulo')
self.dataHandler.registerArgument(priority=0, name='n_pos', values=translationValues, ui='slider')
# Compute all camera settings
for translation in translationValues:
for phi in phiAngles:
phiPos = rotate(rotationAxis, phi, focalPoint, position)
newfocalPoint = tuple(focalPoint[i] + (translation*rotationAxis[i]) for i in range(3))
transPhiPoint = tuple(phiPos[i] + (translation*rotationAxis[i]) for i in range(3))
self.cameraSettings.append({
'n_pos': translation,
'n_posIdx': translationValues.index(translation),
'phi': phi,
'phiIdx': phiAngles.index(phi),
'focalPoint': newfocalPoint,
'position': transPhiPoint,
'viewUp': rotationAxis
})
self.dataHandler.updateBasePattern()
def updatePriority(self, priorityList):
keyList = ['n_pos', 'phi']
for idx in range(min(len(priorityList), len(keyList))):
self.dataHandler.updatePriority(keyList[idx], priorityList[idx])
def __iter__(self):
for cameraData in self.cameraSettings:
self.dataHandler.setArguments(phi=cameraData['phiIdx'], n_pos=cameraData['n_posIdx'])
yield cameraData
# -----------------------------------------------------------------------------
# MultiView Camera
# -----------------------------------------------------------------------------
class MultiViewCamera(object):
def __init__(self, dataHandler):
self.dataHandler = dataHandler
self.cameraSettings = []
self.positionNames = []
def registerViewPoint(self, name, focalPoint, position, viewUp):
self.cameraSettings.append({'name': name, 'nameIdx': len(self.positionNames), 'focalPoint': focalPoint, 'position': position, 'viewUp': viewUp})
self.positionNames.append(name)
self.dataHandler.registerArgument(priority=0, name='multiView', values=self.positionNames)
self.dataHandler.updateBasePattern()
def updatePriority(self, priorityList):
keyList = ['multiView']
for idx in range(min(len(priorityList), len(keyList))):
self.dataHandler.updatePriority(keyList[idx], priorityList[idx])
def __iter__(self):
for cameraData in self.cameraSettings:
self.dataHandler.setArguments(multiView=cameraData['nameIdx'])
yield cameraData
# -----------------------------------------------------------------------------
# Helper methods
# -----------------------------------------------------------------------------
def update_camera(renderer, cameraData):
camera = renderer.GetActiveCamera()
camera.SetPosition(cameraData['position'])
camera.SetFocalPoint(cameraData['focalPoint'])
camera.SetViewUp(cameraData['viewUp'])
def create_spherical_camera(renderer, dataHandler, phiValues, thetaValues):
camera = renderer.GetActiveCamera()
return SphericalCamera(dataHandler, camera.GetFocalPoint(), camera.GetPosition(), camera.GetViewUp(), phiValues, thetaValues)
def create_cylindrical_camera(renderer, dataHandler, phiValues, translationValues):
camera = renderer.GetActiveCamera()
return CylindricalCamera(dataHandler, camera.GetFocalPoint(), camera.GetPosition(), camera.GetViewUp(), phiValues, translationValues)
| keithroe/vtkoptix | Web/Python/vtk/web/camera.py | Python | bsd-3-clause | 8,442 | [
"VTK"
] | 3d148d3d5ab7225290587824c1d1d925bef3f5d3fe5deeb9821154e5ab06d2b2 |
import pysam
import sys
import gzip
import os
import logging
import argparse
import xml.etree.ElementTree as ET
import subprocess
from CountXmlUtils import readCountXmlQueryLocationInFeatures
DEBUG = False
NOT_DEBUG= not DEBUG
if DEBUG:
genomeListFile="/scratch/stein_lab/shengq2/20200226_4233_4263_michelle_smallRNA_human_v5_byTiger/data_visualization/bacteria_count/result/StaRRA_human_4233_4263__fileList1.list"
databaseFile = "/scratch/stein_lab/shengq2/20200226_4233_4263_michelle_smallRNA_human_v5_byTiger/nonhost_library/bowtie1_rRNA_pm_table/result/rRNA_pm_StaRRA_human_4233_4263.count.xml"
taskReadFile = "/scratch/stein_lab/shengq2/20200226_4233_4263_michelle_smallRNA_human_v5_byTiger/data_visualization/reads_in_tasks/result/StaRRA_human_4233_4263.NonParallel.TaskReads.csv"
outputFile="/scratch/stein_lab/shengq2/20200226_4233_4263_michelle_smallRNA_human_v5_byTiger/data_visualization/bacteria_count/result/StaRRA_human_4233_4263.tsv"
else:
parser = argparse.ArgumentParser(description="Generate smallRNA count from count xml.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-g', '--genomeListFile', action='store', nargs='?', help='Input bacteria genome count xml list file', required=NOT_DEBUG)
parser.add_argument('-d', '--databaseFile', action='store', nargs='?', help="Original rRNA database count xml file", required=NOT_DEBUG)
parser.add_argument('-t', '--taskReadFile', action='store', nargs='?', help="Task read count file", required=NOT_DEBUG)
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output count file", required=NOT_DEBUG)
args = parser.parse_args()
print(args)
genomeListFile = args.genomeListFile
databaseFile = args.databaseFile
taskReadFile = args.taskReadFile
outputFile = args.output
logger = logging.getLogger('getBacteriaCount')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
def readFileList(fileName):
result = []
with open(fileName) as fh:
for line in fh:
filepath = line.strip().split('\t', 1)[0]
result.append(filepath)
return(result)
genomeFiles = readFileList(genomeListFile)
sample_names = set()
result = {}
for genomeFile in genomeFiles:
logger.info("Parsing " + genomeFile)
queryMap = {}
tree = ET.parse(genomeFile)
root = tree.getroot()
queries = root.find('queries')
for query in queries.findall('query'):
query_count = int(query.get("count"))
query_seq = query.get("seq")
sample_name = query.get("sample")
sample_names.add(sample_name)
result.setdefault(query_seq, {})[sample_name] = query_count
logger.info("Parsing " + databaseFile)
tree = ET.parse(databaseFile)
root = tree.getroot()
queries = root.find('queries')
for query in queries.findall('query'):
is_bacteria = False
for loc in query.findall('location'):
seqname = loc.get("seqname")
if seqname == "Bacteria":
is_bacteria = True
break
if is_bacteria:
query_count = int(query.get("count"))
query_seq = query.get("seq")
sample_name = query.get("sample")
sample_names.add(sample_name)
result.setdefault(query_seq, {})[sample_name] = query_count
samples = sorted(sample_names)
seq_count = [ [seq, sum(result[seq].values())] for seq in result.keys()]
def sortSecond(val):
return val[1]
seq_count.sort(key=sortSecond, reverse=True)
with open(outputFile, "wt") as fout:
fout.write("Sequence\t%s\n" % "\t".join(samples) )
for query in seq_count:
query_seq = query[0]
fout.write(query_seq)
count_map = result[query_seq]
for sample in samples:
fout.write("\t%d" % (count_map[sample] if sample in count_map.keys() else 0))
fout.write("\n")
summaryFile = outputFile + ".summary"
with open(summaryFile, "wt") as fout:
fout.write("Sample\tCount\n")
for sample in samples:
sample_count = sum(result[seq][sample] if sample in result[seq].keys() else 0 for seq in result.keys())
fout.write("%s\t%d\n" % (sample, sample_count))
rscript = os.path.realpath(__file__) + ".R"
target_r = os.path.basename(rscript)
with open(target_r, "wt") as fout:
fout.write("outFile='%s'\n" % summaryFile)
fout.write("parFile1='%s'\n" % summaryFile)
fout.write("parFile2='%s'\n" % taskReadFile)
fout.write("setwd('%s')\n\n" % os.path.dirname(os.path.realpath(outputFile)))
with open(rscript, "rt") as fin:
for line in fin:
fout.write(line)
subprocess.call("R --vanilla -f " + target_r, shell=True)
logger.info("done.")
| shengqh/ngsperl | lib/SmallRNA/getBacteriaCount.py | Python | apache-2.0 | 4,574 | [
"pysam"
] | 0b474d17aa2e226b72ae319287ec3578014149193935ff7dddcb368b33ae5ca5 |
import logging
import os
import threading
from mako.template import Template
from galaxy import web
from galaxy.util import json
from galaxy.util import rst_to_html
from galaxy.util import unicodify
import tool_shed.util.shed_util_common as suc
from tool_shed.util import basic_util
from tool_shed.util import common_util
from tool_shed.util import hg_util
log = logging.getLogger( __name__ )
def build_readme_files_dict( app, repository, changeset_revision, metadata, tool_path=None ):
"""
Return a dictionary of valid readme file name <-> readme file content pairs for all readme files defined in the received metadata. Since the
received changeset_revision (which is associated with the received metadata) may not be the latest installable changeset revision, the README
file contents may not be available on disk. This method is used by both Galaxy and the Tool Shed.
"""
if app.name == 'galaxy':
can_use_disk_files = True
else:
repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
latest_downloadable_changeset_revision = suc.get_latest_downloadable_changeset_revision( app, repository, repo )
can_use_disk_files = changeset_revision == latest_downloadable_changeset_revision
readme_files_dict = {}
if metadata:
if 'readme_files' in metadata:
for relative_path_to_readme_file in metadata[ 'readme_files' ]:
readme_file_name = os.path.split( relative_path_to_readme_file )[ 1 ]
if can_use_disk_files:
if tool_path:
full_path_to_readme_file = os.path.abspath( os.path.join( tool_path, relative_path_to_readme_file ) )
else:
full_path_to_readme_file = os.path.abspath( relative_path_to_readme_file )
text = None
try:
f = open( full_path_to_readme_file, 'r' )
text = unicodify( f.read() )
f.close()
except Exception, e:
log.exception( "Error reading README file '%s' from disk: %s" % ( str( relative_path_to_readme_file ), str( e ) ) )
text = None
if text:
text_of_reasonable_length = basic_util.size_string( text )
if text_of_reasonable_length.find( '.. image:: ' ) >= 0:
# Handle image display for README files that are contained in repositories in the tool shed or installed into Galaxy.
lock = threading.Lock()
lock.acquire( True )
try:
text_of_reasonable_length = suc.set_image_paths( app,
app.security.encode_id( repository.id ),
text_of_reasonable_length )
except Exception, e:
log.exception( "Exception in build_readme_files_dict, so images may not be properly displayed:\n%s" % str( e ) )
finally:
lock.release()
if readme_file_name.endswith( '.rst' ):
text_of_reasonable_length = Template( rst_to_html( text_of_reasonable_length ),
input_encoding='utf-8',
output_encoding='utf-8',
default_filters=[ 'decode.utf8' ],
encoding_errors='replace' )
text_of_reasonable_length = text_of_reasonable_length.render( static_path=web.url_for( '/static' ),
host_url=web.url_for( '/', qualified=True ) )
text_of_reasonable_length = unicodify( text_of_reasonable_length )
else:
text_of_reasonable_length = basic_util.to_html_string( text_of_reasonable_length )
readme_files_dict[ readme_file_name ] = text_of_reasonable_length
else:
# We must be in the tool shed and have an old changeset_revision, so we need to retrieve the file contents from the repository manifest.
ctx = hg_util.get_changectx_for_changeset( repo, changeset_revision )
if ctx:
fctx = hg_util.get_file_context_from_ctx( ctx, readme_file_name )
if fctx and fctx not in [ 'DELETED' ]:
try:
text = unicodify( fctx.data() )
readme_files_dict[ readme_file_name ] = basic_util.size_string( text )
except Exception, e:
log.exception( "Error reading README file '%s' from repository manifest: %s" % \
( str( relative_path_to_readme_file ), str( e ) ) )
return readme_files_dict
def get_readme_files_dict_for_display( app, tool_shed_url, repo_info_dict ):
"""
Return a dictionary of README files contained in the single repository being installed so they can be displayed on the tool panel section
selection page.
"""
name = repo_info_dict.keys()[ 0 ]
repo_info_tuple = repo_info_dict[ name ]
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, installed_td = \
suc.get_repo_info_tuple_contents( repo_info_tuple )
# Handle changing HTTP protocols over time.
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( app, tool_shed_url )
params = '?name=%s&owner=%s&changeset_revision=%s' % ( name, repository_owner, changeset_revision )
url = common_util.url_join( tool_shed_url,
'repository/get_readme_files%s' % params )
raw_text = common_util.tool_shed_get( app, tool_shed_url, url )
readme_files_dict = json.loads( raw_text )
return readme_files_dict
def get_readme_file_names( repository_name ):
"""Return a list of file names that will be categorized as README files for the received repository_name."""
readme_files = [ 'readme', 'read_me', 'install' ]
valid_filenames = map( lambda f: '%s.txt' % f, readme_files )
valid_filenames.extend( map( lambda f: '%s.rst' % f, readme_files ) )
valid_filenames.extend( readme_files )
valid_filenames.append( '%s.txt' % repository_name )
valid_filenames.append( '%s.rst' % repository_name )
return valid_filenames
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/tool_shed/util/readme_util.py | Python | gpl-3.0 | 7,015 | [
"Galaxy"
] | 3c0c6f5e7b17301885df2426f501516b0b0c47248e2ecdb6af9f6dc71ac281f4 |
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py,v 1.15 2004/02/18 04:15:05 warnes Exp $'
from version import __version__
import cgi
import copy
from wstools.XMLname import toXMLname, fromXMLname
import fpconst
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = '%(ENV_T)s:Envelope %(ENV_T)s:encodingStyle="%(ENC)s"' % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if Config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if Config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
try:
meth = getattr(self, "dump_" + type(obj).__name__)
except AttributeError:
if type(obj) == LongType:
obj_type = "integer"
elif pythonHasBooleanType and type(obj) == BooleanType:
obj_type = "boolean"
else:
obj_type = type(obj).__name__
self.out.append(self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map)))
else:
meth(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if Config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_float."
tag = tag or self.gentag()
if Config.strict_range:
doubleType(obj)
if fpconst.isPosInf(obj):
obj = "INF"
elif fpconst.isNegInf(obj):
obj = "-INF"
elif fpconst.isNaN(obj):
obj = "NaN"
else:
obj = str(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(None, "double", obj, tag, typed, ns_map,
self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_string."
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType: # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + sample._type
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
typed = not same_type
try: elemsname = obj._elemsname
except: elemsname = "item"
for i in data:
self.dump(i, elemsname, typed, ns_map)
self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_instance(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_instance.", "obj=", obj, "tag=", tag
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
if isinstance(obj, arrayType): # Array
self.dump_list(obj, tag, typed, ns_map)
return
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('''<%sFault %sroot="1"%s%s>
<faultcode>%s</faultcode>
<faultstring>%s</faultstring>
''' % (vns, cns, vdecl, cdecl, obj.faultcode, obj.faultstring))
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
return
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None, header=None,
methodattrs=None,envelope=1,encoding='UTF-8',config=Config,noroot = 0):
t = SOAPBuilder(args=args,kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
| intip/da-apps | plugins/da_centrallogin/modules/soappy/SOAPpy/SOAPBuilder.py | Python | gpl-2.0 | 20,377 | [
"Brian"
] | f074d9dd249fb56fac0242553b75b920d219538b195320bf46b1c0b08f6b24ad |
from __future__ import print_function
import argparse
import os
import random
from collections import defaultdict
import pysam
import mirtop.libs.logger as mylog
import mirtop.libs.do as runner
parser = argparse.ArgumentParser()
parser.add_argument("--fa",
help="File with mature sequences.", required=True)
parser.add_argument("-o", "--out", default="spikeins.fa",
help="Name used for output files.")
parser.add_argument("--seed", help="set up seed for reproducibility.",
default=42)
parser.add_argument("--max_size", help="maximum size allowed in the final output.",
default=25)
args = parser.parse_args()
random.seed(args.seed)
def _sam_to_bam(bam_fn):
bam_out = "%s.bam" % os.path.splitext(bam_fn)[0]
cmd = "samtools view -Sbh {bam_fn} -o {bam_out}"
runner.run(cmd.format(**locals()))
return bam_fn
def _bam_sort(bam_fn):
bam_sort_by_n = os.path.splitext(bam_fn)[0] + "_sort.bam"
runner.run(("samtools sort -n -o {bam_sort_by_n} {bam_fn}").format(
**locals()))
return bam_sort_by_n
def _read_fasta(fa):
source = dict()
with open(fa) as inh:
for line in inh:
if line.startswith(">"):
name = line.strip().split()[0].replace(">", "")
else:
source.update({name: line.strip()})
return source
def _write_fasta(sequences, filename, max=25):
with open(filename, 'w') as outh:
for name in sequences:
if sequences[name]:
if len(sequences[name]) < max:
print(">%s\n%s" % (name, sequences[name]), file=outh)
return filename
def _parse_hits(sam, source):
uniques = defaultdict(list)
# bam_fn = _sam_to_bam(sam)
# bam_fn = _bam_sort(bam_fn)
# read sequences and score hits (ignore same sequence)
handle = pysam.Samfile(sam, "rb")
for line in handle:
reference = handle.getrname(line.reference_id)
name = line.query_name
# sequence = line.query_sequence if not line.is_reverse else reverse_complement(line.query_sequence)
if reference == name:
continue
# print([reference, name, line.get_tag("NM")])
distance = line.get_tag("NM")
uniques[name].append(distance)
uniques[reference].append(distance)
# read parsed data and keep the ones with score > 10 edit distance
for name in uniques:
if min(uniques[name]) < 5:
if name in source:
source[name] = None
return source
# Map all vs all with razers3
source = _read_fasta(args.fa)
sam = os.path.join(os.path.dirname(args.out), "modified.bam")
runner.run(("razers3 -dr 5 -i 75 -rr 80 -f -so 1 -o {output} {target} {query}").format(output=sam, target=args.fa, query=args.fa))
uniques = _parse_hits(sam, source)
# Write uniques to fasta
_write_fasta(uniques, args.out, args.max_size)
| miRTop/mirtop | scripts/make_unique.py | Python | mit | 2,935 | [
"pysam"
] | e633d7c2cad8d5c2edb9ff73735dd5db3d112f906b8775be8a4b4fb81b53031f |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2013 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import logging
import os
import platform
import tempfile
import threading
import gio
import gtk
import pango
import poppler
from stoqlib.gui.base.dialogs import get_current_toplevel
from stoqlib.gui.events import PrintReportEvent
from stoqlib.lib.message import warning
from stoqlib.lib.osutils import get_application_dir
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.template import render_template_string
from stoqlib.lib.threadutils import (schedule_in_main_thread,
terminate_thread)
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.report import HTMLReport
from stoqlib.reporting.labelreport import LabelReport
_ = stoqlib_gettext
_system = platform.system()
log = logging.Logger(__name__)
# https://github.com/Kozea/WeasyPrint/issues/130
# http://pythonhosted.org/cairocffi/cffi_api.html#converting-pycairo-wrappers-to-cairocffi
def _UNSAFE_pycairo_context_to_cairocffi(pycairo_context):
import cairocffi
# Sanity check. Continuing with another type would probably segfault.
if not isinstance(pycairo_context, gtk.gdk.CairoContext):
raise TypeError('Expected a cairo.Context, got %r' % pycairo_context)
# On CPython, id() gives the memory address of a Python object.
# pycairo implements Context as a C struct:
# typedef struct {
# PyObject_HEAD
# cairo_t *ctx;
# PyObject *base;
# } PycairoContext;
# Still on CPython, object.__basicsize__ is the size of PyObject_HEAD,
# ie. the offset to the ctx field.
# ffi.cast() converts the integer address to a cairo_t** pointer.
# [0] dereferences that pointer, ie. read the ctx field.
# The result is a cairo_t* pointer that cairocffi can use.
return cairocffi.Context._from_pointer(
cairocffi.ffi.cast('cairo_t **',
id(pycairo_context) + object.__basicsize__)[0],
incref=True)
class PrintOperation(gtk.PrintOperation):
def __init__(self, report):
gtk.PrintOperation.__init__(self)
self.connect("begin-print", self._on_operation_begin_print)
self.connect("draw-page", self._on_operation_draw_page)
self.connect("done", self._on_operation_done)
self.connect("paginate", self._on_operation_paginate)
self.connect("status-changed", self._on_operation_status_changed)
self._in_nested_main_loop = False
self._threaded = False
self._printing_complete = False
self._report = report
self._rendering_thread = None
self.set_job_name(self._report.title)
self.set_show_progress(True)
self.set_track_print_status(True)
# Public API
def set_threaded(self):
self._threaded = True
self.set_allow_async(True)
def run(self):
gtk.PrintOperation.run(self,
gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG,
parent=get_current_toplevel())
# GtkPrintOperation.run() is not blocking by default, as the rendering
# is threaded we need to wait for the operation to finish before we can
# return from here, since currently the rendering depends on state that
# might be released just after exiting this function.
if self._threaded:
self._in_nested_main_loop = True
gtk.main()
self._in_nested_main_loop = False
def begin_print(self):
"""This is called before printing is done.
It can be used to fetch print settings that the user
selected in the dialog
"""
def render(self):
"""Renders the actual page.
This might run in a separate thread, no glib/gtk+ calls are allowed
here, they needs to be done in render_done() which is called when
this is finished.
"""
raise NotImplementedError
def render_done(self):
"""Rendering of the printed page is done. This should call
self.set_n_pages()
"""
raise NotImplementedError
def draw_page(self, cr, page_no):
"""Draws a page
:param cr: a cairo context
:param int page_no: the page to draw
"""
raise NotImplementedError
def done(self):
"""Called when rendering and drawing is complete,
can be used to free resources created during printing.
"""
# Private API
def _threaded_render(self):
self.render()
schedule_in_main_thread(self._threaded_render_done)
def _threaded_render_done(self):
if self.get_status() == gtk.PRINT_STATUS_FINISHED_ABORTED:
return
self.render_done()
self._printing_complete = True
def _is_rendering_finished(self):
return self.get_status() in [
gtk.PRINT_STATUS_SENDING_DATA,
gtk.PRINT_STATUS_FINISHED,
gtk.PRINT_STATUS_FINISHED_ABORTED]
# Callbacks
def _on_operation_status_changed(self, operation):
if (self._in_nested_main_loop and
self._is_rendering_finished()):
gtk.main_quit()
if self.get_status() == gtk.PRINT_STATUS_FINISHED_ABORTED:
terminate_thread(self._rendering_thread)
def _on_operation_begin_print(self, operation, context):
self.begin_print()
if self._threaded:
self._rendering_thread = threading.Thread(target=self._threaded_render)
self._rendering_thread.start()
else:
self.render()
self.render_done()
self._printing_complete = True
def _on_operation_paginate(self, operation, context):
return self._printing_complete
def _on_operation_draw_page(self, operation, context, page_no):
cr = context.get_cairo_context()
self.draw_page(cr, page_no)
def _on_operation_done(self, operation, context):
self.done()
class PrintOperationPoppler(PrintOperation):
def render(self):
# FIXME: This is an specific fix for boleto printing in landscape
# orientation. We should find a better fix for it or simply remove
# PrintOperationPoppler when migrating the last reports using
# reportlab to weasyprint
if getattr(self._report, 'print_as_landscape', False):
default_page_setup = gtk.PageSetup()
default_page_setup.set_orientation(gtk.PAGE_ORIENTATION_LANDSCAPE)
self.set_default_page_setup(default_page_setup)
self._report.save()
uri = gio.File(path=self._report.filename).get_uri()
self._document = poppler.document_new_from_file(uri, password="")
def render_done(self):
self.set_n_pages(self._document.get_n_pages())
def draw_page(self, cr, page_no):
page = self._document.get_page(page_no)
page.render_for_printing(cr)
def done(self):
if not os.path.isfile(self._report.filename):
return
os.unlink(self._report.filename)
class PrintOperationWEasyPrint(PrintOperation):
PRINT_CSS_TEMPLATE = """
@page {
size: ${ page_width }mm ${ page_height }mm;
font-family: "${ font_family }";
}
body {
font-family: "${ font_family }";
font-size: ${ font_size }pt;
}
"""
page_setup_name = 'page_setup.ini'
print_settings_name = 'print_settings.ini'
def __init__(self, report):
PrintOperation.__init__(self, report)
self._load_settings()
self.connect('create-custom-widget',
self._on_operation_create_custom_widget)
self.set_embed_page_setup(True)
self.set_use_full_page(True)
self.set_custom_tab_label(_('Stoq'))
def _load_settings(self):
self.config_dir = get_application_dir('stoq')
settings = gtk.PrintSettings()
filename = os.path.join(self.config_dir, self.print_settings_name)
if os.path.exists(filename):
settings.load_file(filename)
self.set_print_settings(settings)
default_page_setup = gtk.PageSetup()
default_page_setup.set_orientation(gtk.PAGE_ORIENTATION_PORTRAIT)
filename = os.path.join(self.config_dir, self.page_setup_name)
if os.path.exists(filename):
default_page_setup.load_file(filename)
self.set_default_page_setup(default_page_setup)
def begin_print(self):
self._fetch_settings()
def render(self):
self._document = self._report.render(
stylesheet=self.print_css)
def render_done(self):
self.set_n_pages(len(self._document.pages))
def draw_page(self, cr, page_no):
import weasyprint
weasyprint_version = tuple(map(int, weasyprint.__version__.split('.')))
if weasyprint_version >= (0, 18):
cr = _UNSAFE_pycairo_context_to_cairocffi(cr)
# 0.75 is here because its also in weasyprint render_pdf()
self._document.pages[page_no].paint(cr, scale=0.75)
# Private
def _fetch_settings(self):
font_name = self.font_button.get_font_name()
settings = self.get_print_settings()
settings.set('stoq-font-name', font_name)
settings.to_file(os.path.join(self.config_dir, self.print_settings_name))
page_setup = self.get_default_page_setup()
page_setup.to_file(os.path.join(self.config_dir, self.page_setup_name))
orientation = page_setup.get_orientation()
paper_size = page_setup.get_paper_size()
width = paper_size.get_width(gtk.UNIT_MM)
height = paper_size.get_height(gtk.UNIT_MM)
if orientation in (gtk.PAGE_ORIENTATION_LANDSCAPE,
gtk.PAGE_ORIENTATION_REVERSE_LANDSCAPE):
width, height = height, width
descr = pango.FontDescription(font_name)
# CSS expects fonts in pt, get_font_size() is scaled,
# for screen display pango.SCALE should be used, it looks
# okay for printed media again, since we're multiplying
# with 0.75 at the easyprint level as well. At some point
# we should probably align them.
font_size = descr.get_size() / pango.SCALE
self.print_css = render_template_string(
self.PRINT_CSS_TEMPLATE,
page_width=width,
page_height=height,
font_family=descr.get_family(),
font_size=font_size)
def _create_custom_tab(self):
# TODO: Improve this code (maybe a slave)
box = gtk.VBox()
table = gtk.Table()
table.set_row_spacings(6)
table.set_col_spacings(6)
table.set_border_width(6)
table.attach(gtk.Label(_('Font:')), 0, 1, 0, 1,
yoptions=0,
xoptions=0)
settings = self.get_print_settings()
font_name = settings.get('stoq-font-name')
self.font_button = gtk.FontButton(font_name)
table.attach(self.font_button, 1, 2, 0, 1,
xoptions=0,
yoptions=0)
box.pack_start(table, False, False)
box.show_all()
return box
# Callbacks
def _on_operation_create_custom_widget(self, operation):
return self._create_custom_tab()
def describe_search_filters_for_reports(filters, **kwargs):
filter_strings = []
for filter in filters:
description = filter.get_description()
if description:
filter_strings.append(description)
kwargs['filter_strings'] = filter_strings
return kwargs
def print_report(report_class, *args, **kwargs):
rv = PrintReportEvent.emit(report_class, *args, **kwargs)
if rv:
return rv
filters = kwargs.pop('filters', None)
if filters:
kwargs = describe_search_filters_for_reports(filters, **kwargs)
tmp = tempfile.mktemp(suffix='.pdf', prefix='stoqlib-reporting')
report = report_class(tmp, *args, **kwargs)
report.filename = tmp
if _system == "Windows":
report.save()
log.info("Starting PDF reader for %r" % (report.filename, ))
# Simply execute the file
os.startfile(report.filename)
return
if isinstance(report, HTMLReport):
op = PrintOperationWEasyPrint(report)
op.set_threaded()
else:
op = PrintOperationPoppler(report)
rv = op.run()
return rv
def print_labels(label_data, store, purchase=None, receiving=None):
path = sysparam.get_string('LABEL_TEMPLATE_PATH')
if path and os.path.exists(path):
if purchase:
print_report(LabelReport, purchase.get_data_for_labels(),
label_data.skip, store=store)
elif receiving:
data = []
for purchase in receiving.purchase_orders:
data.extend(purchase.get_data_for_labels())
print_report(LabelReport, data, label_data.skip, store=store)
else:
print_report(LabelReport, [label_data], label_data.skip, store=store)
else:
warning(_("It was not possible to print the labels. The "
"template file was not found."))
| andrebellafronte/stoq | stoqlib/gui/utils/printing.py | Python | gpl-2.0 | 14,009 | [
"VisIt"
] | a097201c052a984ec20a890f86050632fa2f9bb5ab97580818e5d4aef11529f3 |
#!/usr/bin/python
'''
Script to find trade routes (and LuvSats) from an SSW sector map
'''
# Copyright 2008, 2015-2016 Squiffle
# TODO: Figure out shortest trade and mining routes as well as most profitable.
# TODO: Add command-line options for max_trade_routes, max_mine_routes, min_buy_routes and routes_to_print.
# TODO: max_trade_routes and max_mining_routes are a bit wrong.
# They're the number of different *profits* that we'll list.
# While routes_to_print controls the number of routes we'll print for each profit
# Ideally, we'd print the shortest 10 routes that net the best profit.
# TODO: Change command-line to allow printing of just buy or sell prices
# TODO: I think the script needs to be split in two.
# luvsats, asteroids, sectors to probe and even shield ore are unrelated
# to trade routes, although it's useful to share some of the code.
# Should be do-able after moving some code to ssw_map_utils
# TODO: I think we might miss some trade routes when space is partially drone-filled.
# We exclude enemy sectors, but do we find all the best routes from what's left ?
from __future__ import absolute_import
from __future__ import print_function
import ssw_sector_map2 as ssw_sector_map
import ssw_map_utils, ssw_societies, ssw_utils
import operator, sys, getopt, datetime
import six
from six.moves import map
version = 1.02
class Invalid_Ore(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
fout = sys.stdout
def port_str(tuple):
'''
Converts a (sector, alignment) tuple to a string suitable for printing
'''
return "%d (%s)" % tuple
def ports_str(sectors):
'''
Converts a list of (sector, alignment) tuples to a string suitable for printing
'''
retval = "port(s) "
if len(sectors) == 0:
retval = 'no ports'
else:
retval = ', '.join(map(port_str,sectors))
return retval
def planet_str(planets):
'''
Converts a list of planets to a string suitable for printing
'''
retval = ""
if len(planets) == 0:
retval = 'no planets'
else:
retval = ', '.join(planets)
return retval
def drone_str_for_sectors(sectors, drones_by_sector):
'''
Returns a string detailing who (if anyone) owns each sector in the list.
'''
# TODO: If we see any drones in the map, then no drones => neutral.
# If we see no drones, probably want to suppress this altogether.
def to_str(sector):
try:
return drones_by_sector[sector]
except:
return ''
drone_list = list(map(to_str, sectors))
return '[' + ', '.join(drone_list) + ']'
def print_best_ore_prices(p, ore_best_sectors, indent, society, buy, sell, unexplored_sector_society=None):
'''
Prints the best routes for ore_best_sectors
Returns the number of routes printed
'''
assert (buy or sell)
routes_printed = 0
# TODO print the alignment info that's in ore_best_sectors
best_sectors = [sector for sector, alignment in ore_best_sectors]
routes = ssw_map_utils.best_routes(p, best_sectors, None, society, unexplored_sector_society)
for dis, route, drones, src, dest, poss in routes:
print("%s%s" % (indent, route), end=' ', file=fout)
# Only count routes that can be taken
if dis < ssw_sector_map.sectors_per_row:
routes_printed += 1
# Figure out the power impact of the trade(s)
port = p.trading_port_in_sector(dest)
assert port, "Trying to trade in sector %d" % dest
if buy:
good = port.good
order = port.order
else:
good = -port.good
order = -port.order
print("[GE:%+d OC:%+d]" % (good, order), end=' ', file=fout)
if len(p.drones):
print(ssw_utils.drones_str(drones, poss), file=fout)
else:
print(file=fout)
else:
print(file=fout)
return routes_printed
def print_routes(p, sources, destinations, society, trade_at_start, unexplored_sector_society=None, max=200):
'''
Prints the best routes from each source sector to each destination sector
Returns the number of routes printed
'''
routes = []
routes_printed = 0
for src in sources:
for x in ssw_map_utils.best_routes(p, destinations, src, society, unexplored_sector_society):
routes.append(x)
routes = sorted(routes,key=operator.itemgetter(0))
for route in routes[:max]:
print(" %s " % (route[1]), end=' ', file=fout)
# Is there actually a route between those sectors ?
if route[0] < ssw_sector_map.sectors_per_row:
# This one counts
routes_printed += 1
# Figure out the power impact of the trade(s)
good = 0
order = 0
if trade_at_start:
port = p.trading_port_in_sector(route[3])
assert port, "Trying to buy from sector %d" % route[3]
good = port.good
order = port.order
port = p.trading_port_in_sector(route[4])
assert port, "Trying to sell to sector %d" % route[4]
good -= port.good
order -= port.order
print("[GE:%+d OC:%+d]" % (good, order), end=' ', file=fout)
if len(p.drones):
print(ssw_utils.drones_str(route[2], route[5]), file=fout)
else:
print(file=fout)
else:
print(file=fout)
print(file=fout)
return routes_printed
def print_ore_buy_routes(p,
ore,
price_list,
indent,
min_buy_routes,
society,
unexplored_sector_society=None,
header_indent=''):
'''
Prints the list of routes to buy the ore, with an optional header line
'''
buy_routes_printed = 0
if len(price_list) > 0:
for price, sectors in price_list:
if (buy_routes_printed >= min_buy_routes):
break
print("%sBuy %s for %d:" % (header_indent, ore, price), file=fout)
buy_routes_printed += print_best_ore_prices(p,
sectors,
indent,
society,
True,
False,
unexplored_sector_society)
else:
print("%sNowhere to buy %s" % (header_indent, ore), file=fout)
def parse_asteroid_line(line):
'''
Parse a line listing the asteroids of one ore.
Returns a tuple of (ore, list of sectors).
'''
ar = line.split(' ')
# Skip spaces
i = 0
while i < len(ar) and ar[i] == '':
i += 1
# Skip "X of X", which is 3 elements of the array
i += 3
ore = ar[i]
# Skip text until we find a number
# Note that we expect a comma after most numbers, so don't check the last character
while i < len(ar) and not ar[i][:-1].isdigit():
i += 1
# Read the sector number, then skip to the next number
sectors = []
while i < len(ar):
if len(ar[i]) == 0:
i += 1
continue
if ar[i][0] == '[':
break
tmp = ar[i]
if tmp[-1] == ',':
tmp = tmp[:-1]
sectors.append(int(tmp))
i += 1
return (ore, sectors)
def import_asteroids_file(filename, p):
'''
Add details of asteroids from a previous run this same cycle.
This is useful when you're at degree 33 and you start forgetting
details of sectors with enemy drones. Save the output with "-mta"
from an earlier run and use it to supplement your asteroid knowledge.
'''
ast_file = open(filename)
# Skip down to the list of asteroids
for line in ast_file:
if line == 'Asteroids\n':
break;
# Now read the list of asteroids for each ore
for line in ast_file:
if line == '\n':
# We're done
break
# Now parse the line itself
ore, sectors = parse_asteroid_line(line)
# And add the asteroids into the map
p.enhance_map_with_asteroids([(ore, sector) for sector in sectors])
ast_file.close()
def total_distance(port_set, distances):
'''
Return the total travel distance to visit the list of ports.
Port_set may well contain one port multiple times.
Distances is a dict, keyed by port, of distance.
'''
distance = 0
for port in set(port_set):
distance += distances[port]
return distance
def add_ports_to_port_sets(port_sets, ports, ore):
'''
If port_sets is empty, return a list of dicts, keyed by ore, with one of ports in each entry.
Otherwise, return a list of dicts, keyed by ore, with the content of port_sets
and one of ports added.
Thus is there are 3 port_sets and len(ports) is 2, a list of length 6 will result.
'''
retval = []
for port in ports:
for port_set in port_sets:
new_port_set = port_set.copy()
new_port_set[ore] = port
retval.append(new_port_set)
if len(port_sets) == 0:
port_set = {}
port_set[ore] = port
retval.append(port_set)
return retval
def usage(progname, map_file):
'''
Prints usage information
'''
print("Usage: %s [-s] [-m] [-t] [-h] [-j] [-p] [-c] [-b] [-l] [-a] [-w] [-y] [-n] [-e] [-x] [-d {a|e|i|o|t}] [-r ore] [-g ore_list] [-i asteroids_filename] [-o output_filename] [map_filename]" % progname)
print()
print(" Find trade or mining routes")
print()
print(" -s|--no-summary - don't print the summary")
print(" -m|--no-trade - don't print trade routes")
print(" -t|--no-mining - don't print mining routes")
print(" -h|--help - print usage and exit")
print(" -j|--dont-enhance - just use the map file, no other knowledge")
print(" -p|--prices - print ore price list")
print(" -c|--cheapest_ore - print where to buy each ore")
print(" -b|--shield_ore - print places to buy shield ore (Bofhozonite) cheapest")
print(" -l|--luvsats - print routes to luvsats")
print(" -a|--asteroids - list asteroids")
print(" --ports - list trading ports")
print(" -w|--probe - list sectors to probe")
print(" -y|--your-drones - list where your drones are")
print(" -n|--control - list number of sectors controlled by each society")
print(" -e|--empire - assume that unexplored sectors contain Amaranth drones")
print(" -x|--links - print the missing links")
print(" -d|--drones {a|e|i|o|t} - avoid drones not belonging to the specified society")
print(" -r|--ore - list all the places to get the specified ore")
print(" -i|--input - read extra asteroid info from the specified file (pointless unless they've moved since the map was saved)")
print(" -o|--output - write output to the specified file")
print(" -g|--groceries - report the best route to buy all the specified ores")
print(" ore_list is comma-separated, with no whitespace")
print(" map_filename defaults to %s" % map_file)
print(" default is to just print trade and mining routes")
print()
print(" Version %.2f. Brought to you by Squiffle" % version)
def parse_ore_list_arg(arg_str):
'''
Parse the string as a comma-separated list of ores
Return the list
Note that this is very generous - an arg of 'l' will return ['Lolnium', 'Lmaozium'] because both start with 'l'.
'''
retval = []
strs = arg_str.split(',')
for str in strs:
new_ores = [ore for ore in ssw_sector_map.all_ores if ore.lower().startswith(str.lower())]
if len(new_ores) == 0:
# No matches at all
raise Invalid_Ore(str)
retval += new_ores
return retval
def main(*arguments):
'''
Do whatever the user wants !
Returns the parsed map.
'''
# Defaults, changeable from the command line
map_file = "ssw_sector_map.htm"
asteroids_file = ""
enhance = True
print_summary = True
print_trade_routes = True
print_mining_routes = True
print_buy_prices = False
print_sell_prices = False
print_ore_buying_routes = False
print_luvsats = False
print_shields = False
print_asteroids = False
print_trading_ports = False
print_your_drones = False
print_probe_sectors = False
print_sectors_controlled = False
print_missing_links = False
max_trade_routes = 5
max_mining_routes = 5
min_buy_routes = 3
routes_to_print = 15
society = None
unexplored_sector_society = None
ore_of_interest = None
output_filename = None
ores_to_buy = []
global fout
# Parse command-line options
try:
opts, args = getopt.getopt(arguments,"smthjpcblawynexd:r:g:i:o:",["no-summary","no-trade","no-mining","help","dont-enhance","prices","cheapest-ore","shield-ore","luvsats","asteroids","ports","probe","your-drones","control","empire","links","drones=","ore=","groceries=","input=","output="])
except getopt.GetoptError:
usage(sys.argv[0], map_file)
sys.exit(2)
if len(args) == 1:
map_file = args[0]
elif len(args) > 1:
usage(sys.argv[0], map_file)
sys.exit(2)
for opt,arg in opts:
if (opt == '-s') or (opt == '--no-summary'):
print_summary = False
elif (opt == '-m') or (opt == '--no-trade'):
print_trade_routes = False
elif (opt == '-t') or (opt == '--no-mining'):
print_mining_routes = False
elif (opt == '-h') or (opt == '--help'):
usage(sys.argv[0], map_file)
sys.exit(0)
elif (opt == '-j') or (opt == '--dont-enhance'):
enhance = False
elif (opt == '-d') or (opt == '--drones'):
try:
society = ssw_societies.adjective(arg)
except ssw_societies.Invalid_Society:
print('Unrecognised society "%s" - should be one of %s' % (arg, ssw_societies.initials))
usage(sys.argv[0], map_file)
sys.exit(2)
elif (opt == '-p') or (opt == '--prices'):
print_buy_prices = True
print_sell_prices = True
elif (opt == '-c') or (opt == '--cheapest-ore'):
print_ore_buying_routes = True
elif (opt == '-b') or (opt == '--shield-ore'):
print_shields = True
elif (opt == '-l') or (opt == '--luvsats'):
print_luvsats = True
elif (opt == '-a') or (opt == '--asteroids'):
print_asteroids = True
elif (opt == '--ports'):
print_trading_ports = True
elif (opt == '-w') or (opt == '--probe'):
print_probe_sectors = True
elif (opt == '-y') or (opt == '--your-drones'):
print_your_drones = True
elif (opt == '-n') or (opt == '--control'):
print_sectors_controlled = True
elif (opt == '-e') or (opt == '--empire'):
unexplored_sector_society = ssw_societies.adjective('a')
elif (opt == '-x') or (opt == '--links'):
print_missing_links = True
elif (opt == '-r') or (opt == '--ore'):
try:
ores = parse_ore_list_arg(arg)
if len(ores) == 1:
ore_of_interest = ores[0]
else:
print('Cannot interpret "%s" as one ore - it maps to %s' % (arg, str(ores)))
usage(sys.argv[0], map_file)
sys.exit(2)
except Invalid_Ore:
print('Unrecognised ore "%s"' % (arg))
usage(sys.argv[0], map_file)
sys.exit(2)
elif (opt == '-g') or (opt == '--groceries'):
ores_to_buy = parse_ore_list_arg(arg)
elif (opt == '-o') or (opt == '--output'):
output_filename = arg
fout = open(output_filename, "w")
elif (opt == '-i') or (opt == '--input'):
asteroids_file = arg
# Read and parse the sector map
page = open(map_file)
p = ssw_sector_map.SectorMapParser(page)
map_valid,reason = p.valid()
if not map_valid:
print("Sector map file is invalid - %s" % reason, file=fout)
sys.exit(2)
# Print summary
if print_summary:
print(file=fout)
print("Summary", file=fout)
if p.known_sectors == len(ssw_sector_map.all_sectors):
print(" %d (all) sectors explored" % p.known_sectors, file=fout)
else:
print(" %d of %d sectors explored (%.1f%%)" % (p.known_sectors, len(ssw_sector_map.all_sectors), (100.0*p.known_sectors)/len(ssw_sector_map.all_sectors)), file=fout)
if len(p.forgotten_sectors) > 0:
print(" %d sector(s) forgotten (%.1f%%)" % (len(p.forgotten_sectors), (100.0*len(p.forgotten_sectors))/len(ssw_sector_map.all_sectors)), file=fout)
if len(p.drones) == 0:
print(" No sectors with drones", file=fout)
else:
print(" %d sector(s) with drones (%.1f%%)" % (len(p.drones), (100.0*len(p.drones))/len(ssw_sector_map.all_sectors)), file=fout)
print(" %d sector(s) with your drones (%d drone(s) in total)" % (len(p.your_drones), sum([d for d,s in p.your_drones])), file=fout)
if (len(p.planets) == len(p.expected_planets())):
print(" %d (all) planets" % len(p.planets), file=fout)
else:
print(" %d of %d planets" % (len(p.planets), len(p.expected_planets())), file=fout)
if (len(p.asteroids) == p.expected_asteroids()):
print(" %d (all) asteroids" % len(p.asteroids), file=fout)
else:
print(" %d of %d asteroids" % (len(p.asteroids), p.expected_asteroids()), file=fout)
if (len(p.black_holes) == len(ssw_sector_map.expected_black_holes)):
print(" %d (all) black holes" % len(p.black_holes), file=fout)
else:
print(" %d of %d black holes" % (len(p.black_holes), len(ssw_sector_map.expected_black_holes)), file=fout)
if (len(p.npc_stores) == len(p.expected_npc_stores())):
print(" %d (all) NPC stores" % len(p.npc_stores), file=fout)
else:
print(" %d of %d NPC stores" % (len(p.npc_stores), len(p.expected_npc_stores())), file=fout)
if (len(p.jellyfish) == ssw_sector_map.expected_jellyfish):
print(" %d (all) space jellyfish" % len(p.jellyfish), file=fout)
else:
print(" %d of %d space jellyfish" % (len(p.jellyfish), ssw_sector_map.expected_jellyfish), file=fout)
if (len(p.trading_ports) == ssw_sector_map.expected_trading_ports):
print(" %d (all) trading ports" % len(p.trading_ports), file=fout)
else:
print(" %d of %d trading ports" % (len(p.trading_ports), ssw_sector_map.expected_trading_ports), file=fout)
if (len(p.ipts) == ssw_sector_map.expected_ipts):
print(" %d (all) IPT Beacons" % len(p.ipts), file=fout)
else:
print(" %d of %d IPT Beacons" % (len(p.ipts), ssw_sector_map.expected_ipts), file=fout)
if (len(p.luvsats) == ssw_sector_map.expected_luvsats):
print(" %d (all) luvsats" % len(p.luvsats), file=fout)
else:
print(" %d of %d luvsats" % (len(p.luvsats), ssw_sector_map.expected_luvsats), file=fout)
if print_sectors_controlled:
print(file=fout)
print("Sector control:", file=fout)
neutral = len(ssw_sector_map.all_sectors)
for soc, sectors in sorted(six.iteritems(ssw_map_utils.sectors_by_society(p))):
neutral -= len(sectors)
print(" %s - %d sector(s) (%.1f%%) - %s" % (soc, len(sectors), 100.0*len(sectors)/len(ssw_sector_map.all_sectors), str(sectors)), file=fout)
print(" Neutral - %d sector(s) (%.1f%%)" % (neutral, 100.0*neutral/len(ssw_sector_map.all_sectors)), file=fout)
if print_probe_sectors:
print(file=fout)
if p.known_sectors == len(ssw_sector_map.all_sectors):
print("No sectors to probe", file=fout)
else:
print("%d sector(s) to probe: %s" % (len(ssw_map_utils.all_unknown_sectors(p)), str(ssw_map_utils.all_unknown_sectors(p))), file=fout)
if print_missing_links:
print(file=fout)
ssw_map_utils.dump_missing_links(p, fout)
if enhance:
# Now add in any invariant information that we don't know
p.enhance_map()
# And add in any asteroids if an asteroid file was provided
if len(asteroids_file) > 0:
import_asteroids_file(asteroids_file, p)
# Extract list of drones by sector if we need it
if print_asteroids:
drones_by_sector = ssw_map_utils.drones_by_sector(p)
if print_buy_prices or print_sell_prices:
print(file=fout)
print("Best Trading Port prices:", file=fout)
# Find best ore sell prices if necessary
if print_trade_routes or print_sell_prices or print_shields or print_ore_buying_routes or ore_of_interest != None or len(ores_to_buy) > 0:
ore_buy = ssw_map_utils.places_to_buy_ores(p, society)
if print_sell_prices:
for (ore,price_list) in sorted(six.iteritems(ore_buy)):
if len(price_list) > 0:
(price,sectors) = price_list[0]
print(" %s for sale for %d in %s" % (ore, price, ports_str(sectors)), file=fout)
# Find best ore buy prices if necessary
if print_trade_routes or print_mining_routes or print_buy_prices or ore_of_interest != None:
ore_sell = ssw_map_utils.places_to_sell_ores(p, society)
if print_buy_prices:
print(file=fout)
for (ore,price_list) in sorted(six.iteritems(ore_sell)):
if len(price_list) > 0:
(price,sectors) = price_list[0]
print(" %s bought for %d in %s" % (ore, price, ports_str(sectors)), file=fout)
if print_trade_routes:
profits = []
for ore,price_list in six.iteritems(ore_sell):
for sell_price, sell_sectors in price_list:
for buy_price, buy_sectors in ore_buy[ore]:
if sell_price > buy_price:
profits.append((ore,sell_price-buy_price,sell_price,buy_price,buy_sectors,sell_sectors))
print(file=fout)
# Print trade routes from least to greatest profit
print("%d Most Profitable Trade Routes" % max_trade_routes, file=fout)
# Go through from highest to lowest profit
trade_routes = 0
for (ore,profit,sell_price,buy_price,buy_sectors,sell_sectors) in sorted(profits, key=operator.itemgetter(1), reverse=True):
if (len(buy_sectors) > 0) and (len(sell_sectors) > 0) and (trade_routes < max_trade_routes):
print(" %d profit buying %s for %d from %s and selling in %s" % (profit,ore,buy_price,ports_str(buy_sectors),ports_str(sell_sectors)), file=fout)
# Don't count it if there are no routes
buy_sects = [sector for sector, alignment in buy_sectors]
sell_sects = [sector for sector, alignment in sell_sectors]
if 0 < print_routes(p, buy_sects, sell_sects, society, True, unexplored_sector_society, routes_to_print):
trade_routes += 1
if trade_routes < max_trade_routes:
if trade_routes == 0:
print(" No trade routes found", file=fout)
else:
print(" Only %d trade route(s) found" % trade_routes, file=fout)
if print_mining_routes or print_asteroids or (ore_of_interest != None):
asteroids = ssw_map_utils.asteroids_by_ore(p, society)
all_asteroids = ssw_map_utils.asteroids_by_ore(p, None)
if print_mining_routes:
print(file=fout)
# Print mining routes
print("%d Most Profitable Mining Routes" % max_mining_routes, file=fout)
ast_list = []
for ore,price_list in six.iteritems(ore_sell):
if len(price_list) > 0:
(sell_price, sell_sectors) = price_list[0]
ast_list.append((ore, sell_price, sell_sectors))
# Go through from highest to lowest sell price
mining_routes = 0
for (ore,sell_price,sell_sectors) in sorted(ast_list, key=operator.itemgetter(1), reverse=True):
if (ore in asteroids) and (len(asteroids[ore]) > 0) and (len(sell_sectors) > 0) and (mining_routes < max_mining_routes):
print(" Mine %s in %s, sell for %d in %s" % (ore, str(asteroids[ore]),sell_price,ports_str(sell_sectors)), file=fout)
sell_sects = [sector for sector, alignment in sell_sectors]
if 0 < print_routes(p,
asteroids[ore],
sell_sects,
society,
False,
unexplored_sector_society,
routes_to_print):
mining_routes += 1
if mining_routes < max_mining_routes:
if mining_routes == 0:
print(" No mining routes found", file=fout)
else:
print(" Only %d mining route(s) found" % mining_routes, file=fout)
if print_ore_buying_routes:
print(file=fout)
print("Cheapest places to buy ores", file=fout)
for ore,price_list in sorted(six.iteritems(ore_buy), key=operator.itemgetter(0)):
print_ore_buy_routes(p,
ore,
price_list,
" ",
min_buy_routes,
society,
unexplored_sector_society,
' ')
print(file=fout)
if print_shields and not print_ore_buying_routes:
shields = ssw_map_utils.shield_ore
print(file=fout)
if shields in ore_buy:
print_ore_buy_routes(p,
shields,
ore_buy[shields],
' ',
min_buy_routes,
society,
unexplored_sector_society)
if ore_of_interest != None:
print(file=fout)
if len(ore_buy[ore_of_interest]) > 0 or len(routes) > 0:
print("Places to get %s ore" % ore_of_interest, file=fout)
if len(ore_buy[ore_of_interest]) == 0:
print(" Nowhere to buy it", file=fout)
# Buy it from trading ports
routes_printed = 0
for price, sector_list in ore_buy[ore_of_interest]:
if routes_printed >= routes_to_print:
break
routes_printed += print_best_ore_prices(p,
sector_list,
" Buy for " + str(price) + " - ",
society,
True,
False,
unexplored_sector_society)
# Could mine it from asteroids
try:
routes = ssw_map_utils.best_routes(p,
asteroids[ore_of_interest],
None,
society,
unexplored_sector_society)
except KeyError:
routes = ()
if len(routes) > 0:
print(" Or mine it :", file=fout)
else:
print(" Nowhere to mine it", file=fout)
# Note that we don't need to limit these because there can only be one per asteroid
for distance, route, drones, src, dest, poss in routes:
# Don't print if no route
if distance < ssw_sector_map.sectors_per_row:
print(" %s" % (route), end=' ', file=fout)
if len(p.drones):
print(ssw_utils.drones_str(drones, poss), file=fout)
else:
print(file=fout)
if len(ore_sell[ore_of_interest]) > 0:
print("Places to get rid of %s ore" % ore_of_interest, file=fout)
else:
print("Nowhere to sell %s ore" % ore_of_interest, file=fout)
routes_printed = 0
for price, sector_list in ore_sell[ore_of_interest]:
if routes_printed >= routes_to_print:
break
routes_printed += print_best_ore_prices(p,
sector_list,
" Sell for " + str(price) + " - ",
society,
False,
True,
unexplored_sector_society)
# TODO: List some profitable trade routes, too ? (max_trade_routes ?)
pass
if len(ores_to_buy) >0:
print(file=fout)
print("Best way to buy %s:" % ', '.join(ores_to_buy), file=fout)
# These are keyed by port/sector number
route_by_port = {}
distances = {}
# These are keyed by ore name
ores_str = {}
ports = {}
# First, find the best places to buy each of the ores
for ore in ores_to_buy:
# Cheapest price is the first in the list
(price, sector_list) = ore_buy[ore][0]
ports[ore] = sector_list
ores_str[ore] = '%s for %d' % (ore, price)
for sector in sector_list:
(distance, route, drones, src, dest, poss) = ssw_map_utils.best_route_to_sector(p,
sector,
None,
society,
unexplored_sector_society)
distances[sector] = distance
route_by_port[sector] = route
# Now check whether we can save time by buying two ores at once
# Find all the combos of ports we could use
port_sets = []
for ore in ores_to_buy:
port_sets = add_ports_to_port_sets(port_sets, ports[ore], ore)
# Find the distance for each port_set, and stuff it in there
dist_port_sets = [(total_distance(list(port_set.values()), distances), port_set) for port_set in port_sets]
# Now sort dist_port_sets by total distance
dist_port_sets.sort(key=operator.itemgetter(0))
# There may be multiple routes with the same length,
# in which case we report all of them
for best_dist_port_set in dist_port_sets:
if best_dist_port_set[0] > dist_port_sets[0][0]:
break
for port in set(best_dist_port_set[1].values()):
intro = ' Buy ' + ', '.join([ore for ore in ores_to_buy if best_dist_port_set[1][ore] == port])
print('%s - %s' % (intro, route_by_port[port]), file=fout)
print(' Total distance = %d moves' % best_dist_port_set[0], file=fout)
print(file=fout)
if print_asteroids:
print(file=fout)
print("Asteroids", file=fout)
for ore in sorted(all_asteroids.keys()):
print(" %d of %d %s asteroids in %s" % (len(all_asteroids[ore]),
p.expected_asteroids()/len(list(all_asteroids.keys())),
ore,
ssw_utils.sector_str(all_asteroids[ore])), end=' ', file=fout)
if (len(p.drones) > 0):
print(" %s" % ( drone_str_for_sectors(all_asteroids[ore],
drones_by_sector)), file=fout)
else:
print(file=fout)
print(file=fout)
print("Asteroid clusters", file=fout)
for group in sorted(ssw_map_utils.asteroid_clusters(p), key=len, reverse=True):
if len(group) > 1:
ores = sorted([ore for ore, sector in group])
sectors = sorted([sector for ore, sector in group])
print(" %d asteroids %s in sectors %s" % (len(group), str(ores), str(sectors)), end=' ', file=fout)
if (len(p.drones) > 0):
print(" %s" % (drone_str_for_sectors(sectors, drones_by_sector)), file=fout)
else:
print(file=fout)
print(file=fout)
print("Asteroids next to planets", file=fout)
for ore, sector in sorted(ssw_map_utils.asteroids_by_planets(p),key=operator.itemgetter(0)):
if sector in drones_by_sector:
drones = " ['"+ drones_by_sector[sector] + "']"
else:
drones = ""
planets = [planet for (planet, loc) in p.planets if loc in ssw_sector_map.adjacent_sectors(sector,
p.can_move_diagonally())]
planets_str = planet_str(planets)
print(" %s asteroid in sector %d next to %s%s" % (ore, sector, planets_str, drones), file=fout)
# Only display asteroid ownership if we know of at least one droned sector
if (len(p.drones) > 0):
for soc in ssw_societies.adjectives:
print(file=fout)
print("%s Asteroid sectors" % soc, file=fout)
all_ast_sectors = []
for ore,sectors in all_asteroids.items():
all_ast_sectors += [sector for sector in sectors if sector in drones_by_sector and drones_by_sector[sector] == soc]
print(" %s (%d)" % (str(all_ast_sectors), len(all_ast_sectors)), file=fout)
if print_trading_ports:
print(file=fout)
print("Trading Ports", file=fout)
for port in p.trading_ports:
print(port, file=fout)
if print_luvsats:
print(file=fout)
print("LuvSats", file=fout)
for dis,route,drones,src,dest,poss in ssw_map_utils.best_routes(p,
p.luvsats,
None,
society,
unexplored_sector_society):
print(" %s" % (route), file=fout)
if print_your_drones:
print(file=fout)
print("Your Drones", file=fout)
total_drones = 0
for drones, sector in p.your_drones:
total_drones += drones
print(" %6d drones in sector %d" % (drones, sector), file=fout)
print(" %6d drones in space in total" % (total_drones), file=fout)
# Check that this is today's map
if not ssw_map_utils.is_todays(p):
print()
print("**** Map is more than 24 hours old")
print("From cycle %d," % p.cycle(), end=' ')
if (p.war_ongoing()):
print("before", end=' ')
else:
print("after", end=' ')
print("the war ended")
# Check for unknown sectors with jellyfish
unknown_sectors_with_jellyfish = ssw_map_utils.unknown_sectors_with_jellyfish(p)
if len(unknown_sectors_with_jellyfish) > 0:
print()
print("**** Don't forget to feed the empaths at New Ceylon")
print("**** That will explore %d sector(s) : %s" % (len(unknown_sectors_with_jellyfish), str(sorted(list(unknown_sectors_with_jellyfish)))))
if output_filename != None:
fout.close()
# Return the parsed map, in case we're a mere utility
return p
if __name__ == '__main__':
main(*sys.argv[1:])
| UEWBot/ssw-scripts | ssw_trade_routes.py | Python | gpl-3.0 | 37,002 | [
"VisIt"
] | 8d1bec7ad2134673d261461c4ef1d00d032a5d600df294ca9362dd6774c9c46f |
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/covariance/robust_covariance.py | Python | gpl-2.0 | 29,653 | [
"Gaussian"
] | 2e95ba77da0b87311502f21f08359d991e02add37cfdc92f32d35c09e24fe079 |
#!/usr/bin/python
"""Test of sayAll output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Add"))
sequence.append(utils.AssertPresentationAction(
"1. KP_Add to do a SayAll",
["SPEECH OUTPUT: 'Hello world.'",
"SPEECH OUTPUT: 'I wonder what a bevezeto is.'",
"SPEECH OUTPUT: 'I should Google that.'",
"SPEECH OUTPUT: 'Aha! It is the Hungarian word for \"Introduction\".'",
"SPEECH OUTPUT: 'Here is some'",
"SPEECH OUTPUT: 'proof'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'I really think we need to get Attila to teach the Orca team some Hungarian.'",
"SPEECH OUTPUT: 'Maybe one (really easy) phrase per bug comment.'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'Foo'",
"SPEECH OUTPUT: 'link'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| pvagner/orca | test/keystrokes/firefox/say_all_bug_591351_1.py | Python | lgpl-2.1 | 969 | [
"ORCA"
] | b23acae7bb669e3deddff5bcd0e4653c7447a80037ea4e7e11cb3e8ac4ae545e |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.