text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import re
from .torque import Torque
__all__ = ('SlurmTorque',)
class SlurmTorque(Torque):
""" A CLI job executor for Slurm's Torque compatibility mode. This differs
from real torque CLI in that -x command line is not available so job status
needs to be parsed from qstat table instead of XML.
"""
def get_status(self, job_ids=None):
return 'qstat'
def parse_status(self, status, job_ids):
rval = {}
for line in status.strip().splitlines():
if line.startswith("Job ID"):
continue
line_parts = re.compile("\s+").split(line)
if len(line_parts) < 5:
continue
id = line_parts[0]
state = line_parts[4]
if id in job_ids:
# map PBS job states to Galaxy job states.
rval[id] = self._get_job_state(state)
return rval
|
ssorgatem/pulsar
|
pulsar/managers/util/cli/job/slurm_torque.py
|
Python
|
apache-2.0
| 905
|
[
"Galaxy"
] |
6d02c5a4b80c644c5b7cbdd882879e919a39d6484f3b501d4857124d881c38e4
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Wave Function Stability Analysis
Ref.
JCP, 66, 3045
JCP, 104, 9047
See also tddft/rhf.py and scf/newton_ah.py
'''
import numpy
import scipy
from functools import reduce
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import hf, hf_symm, uhf_symm
from pyscf.scf import _response_functions
from pyscf.soscf import newton_ah
def rhf_stability(mf, internal=True, external=False, verbose=None):
'''
Stability analysis for RHF/RKS method.
Args:
mf : RHF or RKS object
Kwargs:
internal : bool
Internal stability, within the RHF space.
external : bool
External stability. Including the RHF -> UHF and real -> complex
stability analysis.
Returns:
New orbitals that are more close to the stable condition. The return
value includes two set of orbitals. The first corresponds to the
internal stability and the second corresponds to the external stability.
'''
mo_i = mo_e = None
if internal:
mo_i = rhf_internal(mf, verbose=verbose)
if external:
mo_e = rhf_external(mf, verbose=verbose)
return mo_i, mo_e
def uhf_stability(mf, internal=True, external=False, verbose=None):
'''
Stability analysis for RHF/RKS method.
Args:
mf : UHF or UKS object
Kwargs:
internal : bool
Internal stability, within the UHF space.
external : bool
External stability. Including the UHF -> GHF and real -> complex
stability analysis.
Returns:
New orbitals that are more close to the stable condition. The return
value includes two set of orbitals. The first corresponds to the
internal stability and the second corresponds to the external stability.
'''
mo_i = mo_e = None
if internal:
mo_i = uhf_internal(mf, verbose=verbose)
if external:
mo_e = uhf_external(mf, verbose=verbose)
return mo_i, mo_e
def rohf_stability(mf, internal=True, external=False, verbose=None):
'''
Stability analysis for ROHF/ROKS method.
Args:
mf : ROHF or ROKS object
Kwargs:
internal : bool
Internal stability, within the RHF space.
external : bool
External stability. It is not available in current version.
Returns:
The return value includes two set of orbitals which are more close to
the required stable condition.
'''
mo_i = mo_e = None
if internal:
mo_i = rohf_internal(mf, verbose=verbose)
if external:
mo_e = rohf_external(mf, verbose=verbose)
return mo_i, mo_e
def ghf_stability(mf, verbose=None):
log = logger.new_logger(mf, verbose)
with_symmetry = True
g, hop, hdiag = newton_ah.gen_g_hop_ghf(mf, mf.mo_coeff, mf.mo_occ,
with_symmetry=with_symmetry)
hdiag *= 2
def precond(dx, e, x0):
hdiagd = hdiag - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
def hessian_x(x): # See comments in function rhf_internal
return hop(x).real * 2
x0 = numpy.zeros_like(g)
x0[g!=0] = 1. / hdiag[g!=0]
if not with_symmetry: # allow to break point group symmetry
x0[numpy.argmin(hdiag)] = 1
e, v = lib.davidson(hessian_x, x0, precond, tol=1e-4, verbose=log)
if e < -1e-5:
log.note('GHF wavefunction has an internal instability')
mo = _rotate_mo(mf.mo_coeff, mf.mo_occ, v)
else:
log.note('GHF wavefunction is stable in the internal stability analysis')
mo = mf.mo_coeff
return mo
def rhf_internal(mf, with_symmetry=True, verbose=None):
log = logger.new_logger(mf, verbose)
g, hop, hdiag = newton_ah.gen_g_hop_rhf(mf, mf.mo_coeff, mf.mo_occ,
with_symmetry=with_symmetry)
hdiag *= 2
def precond(dx, e, x0):
hdiagd = hdiag - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
# The results of hop(x) corresponds to a displacement that reduces
# gradients g. It is the vir-occ block of the matrix vector product
# (Hessian*x). The occ-vir block equals to x2.T.conj(). The overall
# Hessian for internal reotation is x2 + x2.T.conj(). This is
# the reason we apply (.real * 2) below
def hessian_x(x):
return hop(x).real * 2
x0 = numpy.zeros_like(g)
x0[g!=0] = 1. / hdiag[g!=0]
if not with_symmetry: # allow to break point group symmetry
x0[numpy.argmin(hdiag)] = 1
e, v = lib.davidson(hessian_x, x0, precond, tol=1e-4, verbose=log)
if e < -1e-5:
log.note('RHF/RKS wavefunction has an internal instability')
mo = _rotate_mo(mf.mo_coeff, mf.mo_occ, v)
else:
log.note('RHF/RKS wavefunction is stable in the internal stability analysis')
mo = mf.mo_coeff
return mo
def _rotate_mo(mo_coeff, mo_occ, dx):
dr = hf.unpack_uniq_var(dx, mo_occ)
u = newton_ah.expmat(dr)
return numpy.dot(mo_coeff, u)
def _gen_hop_rhf_external(mf, with_symmetry=True, verbose=None):
mol = mf.mol
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidx = numpy.where(mo_occ==2)[0]
viridx = numpy.where(mo_occ==0)[0]
nocc = len(occidx)
nvir = len(viridx)
orbv = mo_coeff[:,viridx]
orbo = mo_coeff[:,occidx]
if with_symmetry and mol.symmetry:
orbsym = hf_symm.get_orbsym(mol, mo_coeff)
sym_forbid = orbsym[viridx].reshape(-1,1) != orbsym[occidx]
h1e = mf.get_hcore()
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
fock_ao = h1e + mf.get_veff(mol, dm0)
fock = reduce(numpy.dot, (mo_coeff.conj().T, fock_ao, mo_coeff))
foo = fock[occidx[:,None],occidx]
fvv = fock[viridx[:,None],viridx]
hdiag = fvv.diagonal().reshape(-1,1) - foo.diagonal()
if with_symmetry and mol.symmetry:
hdiag[sym_forbid] = 0
hdiag = hdiag.ravel()
vrespz = mf.gen_response(singlet=None, hermi=2)
def hop_real2complex(x1):
x1 = x1.reshape(nvir,nocc)
if with_symmetry and mol.symmetry:
x1 = x1.copy()
x1[sym_forbid] = 0
x2 = numpy.einsum('ps,sq->pq', fvv, x1)
x2-= numpy.einsum('ps,rp->rs', foo, x1)
d1 = reduce(numpy.dot, (orbv, x1*2, orbo.conj().T))
dm1 = d1 - d1.conj().T
# No Coulomb and fxc contribution for anti-hermitian DM
v1 = vrespz(dm1)
x2 += reduce(numpy.dot, (orbv.conj().T, v1, orbo))
if with_symmetry and mol.symmetry:
x2[sym_forbid] = 0
return x2.ravel()
vresp1 = mf.gen_response(singlet=False, hermi=1)
def hop_rhf2uhf(x1):
from pyscf.dft import numint
# See also rhf.TDA triplet excitation
x1 = x1.reshape(nvir,nocc)
if with_symmetry and mol.symmetry:
x1 = x1.copy()
x1[sym_forbid] = 0
x2 = numpy.einsum('ps,sq->pq', fvv, x1)
x2-= numpy.einsum('ps,rp->rs', foo, x1)
d1 = reduce(numpy.dot, (orbv, x1*2, orbo.conj().T))
dm1 = d1 + d1.conj().T
v1ao = vresp1(dm1)
x2 += reduce(numpy.dot, (orbv.conj().T, v1ao, orbo))
if with_symmetry and mol.symmetry:
x2[sym_forbid] = 0
return x2.real.ravel()
return hop_real2complex, hdiag, hop_rhf2uhf, hdiag
def rhf_external(mf, with_symmetry=True, verbose=None):
log = logger.new_logger(mf, verbose)
hop1, hdiag1, hop2, hdiag2 = _gen_hop_rhf_external(mf, with_symmetry)
def precond(dx, e, x0):
hdiagd = hdiag1 - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
x0 = numpy.zeros_like(hdiag1)
x0[hdiag1>1e-5] = 1. / hdiag1[hdiag1>1e-5]
if not with_symmetry: # allow to break point group symmetry
x0[numpy.argmin(hdiag1)] = 1
e1, v1 = lib.davidson(hop1, x0, precond, tol=1e-4, verbose=log)
if e1 < -1e-5:
log.note('RHF/RKS wavefunction has a real -> complex instability')
else:
log.note('RHF/RKS wavefunction is stable in the real -> complex stability analysis')
def precond(dx, e, x0):
hdiagd = hdiag2 - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
x0 = v1
e3, v3 = lib.davidson(hop2, x0, precond, tol=1e-4, verbose=log)
if e3 < -1e-5:
log.note('RHF/RKS wavefunction has a RHF/RKS -> UHF/UKS instability.')
mo = (_rotate_mo(mf.mo_coeff, mf.mo_occ, v3), mf.mo_coeff)
else:
log.note('RHF/RKS wavefunction is stable in the RHF/RKS -> UHF/UKS stability analysis')
mo = (mf.mo_coeff, mf.mo_coeff)
return mo
def rohf_internal(mf, with_symmetry=True, verbose=None):
log = logger.new_logger(mf, verbose)
g, hop, hdiag = newton_ah.gen_g_hop_rohf(mf, mf.mo_coeff, mf.mo_occ,
with_symmetry=with_symmetry)
hdiag *= 2
def precond(dx, e, x0):
hdiagd = hdiag - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
def hessian_x(x): # See comments in function rhf_internal
return hop(x).real * 2
x0 = numpy.zeros_like(g)
x0[g!=0] = 1. / hdiag[g!=0]
if not with_symmetry: # allow to break point group symmetry
x0[numpy.argmin(hdiag)] = 1
e, v = lib.davidson(hessian_x, x0, precond, tol=1e-4, verbose=log)
if e < -1e-5:
log.note('ROHF wavefunction has an internal instability.')
mo = _rotate_mo(mf.mo_coeff, mf.mo_occ, v)
else:
log.note('ROHF wavefunction is stable in the internal stability analysis')
mo = mf.mo_coeff
return mo
def rohf_external(mf, with_symmetry=True, verbose=None):
raise NotImplementedError
def uhf_internal(mf, with_symmetry=True, verbose=None):
log = logger.new_logger(mf, verbose)
g, hop, hdiag = newton_ah.gen_g_hop_uhf(mf, mf.mo_coeff, mf.mo_occ,
with_symmetry=with_symmetry)
hdiag *= 2
def precond(dx, e, x0):
hdiagd = hdiag - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
def hessian_x(x): # See comments in function rhf_internal
return hop(x).real * 2
x0 = numpy.zeros_like(g)
x0[g!=0] = 1. / hdiag[g!=0]
if not with_symmetry: # allow to break point group symmetry
x0[numpy.argmin(hdiag)] = 1
e, v = lib.davidson(hessian_x, x0, precond, tol=1e-4, verbose=log)
if e < -1e-5:
log.note('UHF/UKS wavefunction has an internal instability.')
nocca = numpy.count_nonzero(mf.mo_occ[0]> 0)
nvira = numpy.count_nonzero(mf.mo_occ[0]==0)
mo = (_rotate_mo(mf.mo_coeff[0], mf.mo_occ[0], v[:nocca*nvira]),
_rotate_mo(mf.mo_coeff[1], mf.mo_occ[1], v[nocca*nvira:]))
else:
log.note('UHF/UKS wavefunction is stable in the internal stability analysis')
mo = mf.mo_coeff
return mo
def _gen_hop_uhf_external(mf, with_symmetry=True, verbose=None):
mol = mf.mol
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
occidxa = numpy.where(mo_occ[0]>0)[0]
occidxb = numpy.where(mo_occ[1]>0)[0]
viridxa = numpy.where(mo_occ[0]==0)[0]
viridxb = numpy.where(mo_occ[1]==0)[0]
nocca = len(occidxa)
noccb = len(occidxb)
nvira = len(viridxa)
nvirb = len(viridxb)
orboa = mo_coeff[0][:,occidxa]
orbob = mo_coeff[1][:,occidxb]
orbva = mo_coeff[0][:,viridxa]
orbvb = mo_coeff[1][:,viridxb]
if with_symmetry and mol.symmetry:
orbsyma, orbsymb = uhf_symm.get_orbsym(mol, mo_coeff)
sym_forbida = orbsyma[viridxa].reshape(-1,1) != orbsyma[occidxa]
sym_forbidb = orbsymb[viridxb].reshape(-1,1) != orbsymb[occidxb]
sym_forbid1 = numpy.hstack((sym_forbida.ravel(), sym_forbidb.ravel()))
h1e = mf.get_hcore()
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
fock_ao = h1e + mf.get_veff(mol, dm0)
focka = reduce(numpy.dot, (mo_coeff[0].conj().T, fock_ao[0], mo_coeff[0]))
fockb = reduce(numpy.dot, (mo_coeff[1].conj().T, fock_ao[1], mo_coeff[1]))
fooa = focka[occidxa[:,None],occidxa]
fvva = focka[viridxa[:,None],viridxa]
foob = fockb[occidxb[:,None],occidxb]
fvvb = fockb[viridxb[:,None],viridxb]
h_diaga =(focka[viridxa,viridxa].reshape(-1,1) - focka[occidxa,occidxa])
h_diagb =(fockb[viridxb,viridxb].reshape(-1,1) - fockb[occidxb,occidxb])
hdiag1 = numpy.hstack((h_diaga.reshape(-1), h_diagb.reshape(-1)))
if with_symmetry and mol.symmetry:
hdiag1[sym_forbid1] = 0
mem_now = lib.current_memory()[0]
max_memory = max(2000, mf.max_memory*.8-mem_now)
vrespz = mf.gen_response(with_j=False, hermi=2)
def hop_real2complex(x1):
if with_symmetry and mol.symmetry:
x1 = x1.copy()
x1[sym_forbid1] = 0
x1a = x1[:nvira*nocca].reshape(nvira,nocca)
x1b = x1[nvira*nocca:].reshape(nvirb,noccb)
x2a = numpy.einsum('pr,rq->pq', fvva, x1a)
x2a-= numpy.einsum('sq,ps->pq', fooa, x1a)
x2b = numpy.einsum('pr,rq->pq', fvvb, x1b)
x2b-= numpy.einsum('qs,ps->pq', foob, x1b)
d1a = reduce(numpy.dot, (orbva, x1a, orboa.conj().T))
d1b = reduce(numpy.dot, (orbvb, x1b, orbob.conj().T))
dm1 = numpy.array((d1a-d1a.conj().T, d1b-d1b.conj().T))
v1 = vrespz(dm1)
x2a += reduce(numpy.dot, (orbva.conj().T, v1[0], orboa))
x2b += reduce(numpy.dot, (orbvb.conj().T, v1[1], orbob))
x2 = numpy.hstack((x2a.ravel(), x2b.ravel()))
if with_symmetry and mol.symmetry:
x2[sym_forbid1] = 0
return x2
if with_symmetry and mol.symmetry:
orbsyma, orbsymb = uhf_symm.get_orbsym(mol, mo_coeff)
sym_forbidab = orbsyma[viridxa].reshape(-1,1) != orbsymb[occidxb]
sym_forbidba = orbsymb[viridxb].reshape(-1,1) != orbsyma[occidxa]
sym_forbid2 = numpy.hstack((sym_forbidab.ravel(), sym_forbidba.ravel()))
hdiagab = fvva.diagonal().reshape(-1,1) - foob.diagonal()
hdiagba = fvvb.diagonal().reshape(-1,1) - fooa.diagonal()
hdiag2 = numpy.hstack((hdiagab.ravel(), hdiagba.ravel()))
if with_symmetry and mol.symmetry:
hdiag2[sym_forbid2] = 0
vresp1 = mf.gen_response(with_j=False, hermi=0)
# Spin flip GHF solution is not considered
def hop_uhf2ghf(x1):
if with_symmetry and mol.symmetry:
x1 = x1.copy()
x1[sym_forbid2] = 0
x1ab = x1[:nvira*noccb].reshape(nvira,noccb)
x1ba = x1[nvira*noccb:].reshape(nvirb,nocca)
x2ab = numpy.einsum('pr,rq->pq', fvva, x1ab)
x2ab-= numpy.einsum('sq,ps->pq', foob, x1ab)
x2ba = numpy.einsum('pr,rq->pq', fvvb, x1ba)
x2ba-= numpy.einsum('qs,ps->pq', fooa, x1ba)
d1ab = reduce(numpy.dot, (orbva, x1ab, orbob.conj().T))
d1ba = reduce(numpy.dot, (orbvb, x1ba, orboa.conj().T))
dm1 = numpy.array((d1ab+d1ba.conj().T, d1ba+d1ab.conj().T))
v1 = vresp1(dm1)
x2ab += reduce(numpy.dot, (orbva.conj().T, v1[0], orbob))
x2ba += reduce(numpy.dot, (orbvb.conj().T, v1[1], orboa))
x2 = numpy.hstack((x2ab.real.ravel(), x2ba.real.ravel()))
if with_symmetry and mol.symmetry:
x2[sym_forbid2] = 0
return x2
return hop_real2complex, hdiag1, hop_uhf2ghf, hdiag2
def uhf_external(mf, with_symmetry=True, verbose=None):
log = logger.new_logger(mf, verbose)
hop1, hdiag1, hop2, hdiag2 = _gen_hop_uhf_external(mf, with_symmetry)
def precond(dx, e, x0):
hdiagd = hdiag1 - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
x0 = numpy.zeros_like(hdiag1)
x0[hdiag1>1e-5] = 1. / hdiag1[hdiag1>1e-5]
if not with_symmetry: # allow to break point group symmetry
x0[numpy.argmin(hdiag1)] = 1
e1, v = lib.davidson(hop1, x0, precond, tol=1e-4, verbose=log)
if e1 < -1e-5:
log.note('UHF/UKS wavefunction has a real -> complex instability')
else:
log.note('UHF/UKS wavefunction is stable in the real -> complex stability analysis')
def precond(dx, e, x0):
hdiagd = hdiag2 - e
hdiagd[abs(hdiagd)<1e-8] = 1e-8
return dx/hdiagd
x0 = numpy.zeros_like(hdiag2)
x0[hdiag2>1e-5] = 1. / hdiag2[hdiag2>1e-5]
if not with_symmetry: # allow to break point group symmetry
x0[numpy.argmin(hdiag2)] = 1
e3, v = lib.davidson(hop2, x0, precond, tol=1e-4, verbose=log)
log.debug('uhf_external: lowest eigs of H = %s', e3)
mo = scipy.linalg.block_diag(*mf.mo_coeff)
if e3 < -1e-5:
log.note('UHF/UKS wavefunction has an UHF/UKS -> GHF/GKS instability.')
occidxa = numpy.where(mf.mo_occ[0]> 0)[0]
viridxa = numpy.where(mf.mo_occ[0]==0)[0]
occidxb = numpy.where(mf.mo_occ[1]> 0)[0]
viridxb = numpy.where(mf.mo_occ[1]==0)[0]
nocca = len(occidxa)
nvira = len(viridxa)
noccb = len(occidxb)
nvirb = len(viridxb)
nmo = nocca + nvira
dx = numpy.zeros((nmo*2,nmo*2))
dx[viridxa[:,None],nmo+occidxb] = v[:nvira*noccb].reshape(nvira,noccb)
dx[nmo+viridxb[:,None],occidxa] = v[nvira*noccb:].reshape(nvirb,nocca)
u = newton_ah.expmat(dx - dx.conj().T)
mo = numpy.dot(mo, u)
mo = numpy.hstack([mo[:,:nocca], mo[:,nmo:nmo+noccb],
mo[:,nocca:nmo], mo[:,nmo+noccb:]])
else:
log.note('UHF/UKS wavefunction is stable in the UHF/UKS -> GHF/GKS stability analysis')
return mo
if __name__ == '__main__':
from pyscf import gto, scf, dft
mol = gto.M(atom='O 0 0 0; O 0 0 1.2222', basis='631g*')
mf = scf.RHF(mol).run()
rhf_stability(mf, True, True, verbose=4)
mf = dft.RKS(mol).run(level_shift=.2)
rhf_stability(mf, True, True, verbose=4)
mf = scf.UHF(mol).run()
mo1 = uhf_stability(mf, True, True, verbose=4)[0]
mf = scf.newton(mf).run(mo1, mf.mo_occ)
uhf_stability(mf, True, False, verbose=4)
mf = scf.newton(scf.UHF(mol)).run()
uhf_stability(mf, True, False, verbose=4)
mol.spin = 2
mf = scf.UHF(mol).run()
uhf_stability(mf, True, True, verbose=4)
mf = dft.UKS(mol).run()
uhf_stability(mf, True, True, verbose=4)
mol = gto.M(atom='''
O1
O2 1 1.2227
O3 1 1.2227 2 114.0451
''', basis = '631g*')
mf = scf.RHF(mol).run()
rhf_stability(mf, True, True, verbose=4)
mf = scf.UHF(mol).run()
mo1 = uhf_stability(mf, True, True, verbose=4)[0]
mf = scf.newton(scf.UHF(mol)).run()
uhf_stability(mf, True, True, verbose=4)
|
gkc1000/pyscf
|
pyscf/scf/stability.py
|
Python
|
apache-2.0
| 19,152
|
[
"PySCF"
] |
8abd85aa78721287001e5bbf1dbe844f359f87948f17c991fd0e9f09d450cf96
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
__doc__ = """
Components that can be used to define a 1D model for e.g. curve fitting.
There are some components that are only useful for one particular kind of signal
and therefore their name are preceded by the signal name: eg. eels_cl_edge.
Writing a new template is really easy, just edit _template.py and maybe take a
look to the other components.
For more details see each component docstring.
====================================================================
"""
from hyperspy._components.arctan import Arctan
from hyperspy._components.bleasdale import Bleasdale
from hyperspy._components.heaviside import HeavisideStep
from hyperspy._components.eels_double_power_law import DoublePowerLaw
from hyperspy._components.eels_cl_edge import EELSCLEdge
from hyperspy._components.error_function import Erf
from hyperspy._components.exponential import Exponential
from hyperspy._components.gaussian import Gaussian
from hyperspy._components.gaussianhf import GaussianHF
from hyperspy._components.logistic import Logistic
from hyperspy._components.lorentzian import Lorentzian
from hyperspy._components.offset import Offset
from hyperspy._components.power_law import PowerLaw
from hyperspy._components.pes_see import SEE
from hyperspy._components.rc import RC
from hyperspy._components.eels_vignetting import Vignetting
from hyperspy._components.voigt import Voigt
from hyperspy._components.scalable_fixed_pattern import ScalableFixedPattern
from hyperspy._components.skew_normal import SkewNormal
from hyperspy._components.polynomial import Polynomial
from hyperspy._components.pes_core_line_shape import PESCoreLineShape
from hyperspy._components.volume_plasmon_drude import VolumePlasmonDrude
from hyperspy._components.expression import Expression
# Generating the documentation
# Grab all the currently defined globals and make a copy of the keys
# (can't use it directly, as the size changes)
_keys = [key for key in globals().keys()]
# For every key in alphabetically sorted order
for key in sorted(_keys):
# if it does not start with a "_"
if not key.startswith('_'):
# get the component class (or function)
component = eval(key)
# If the component has documentation, grab the first 43 characters of
# the first line of the documentation. Else just use two dots ("..")
second_part = '..' if component.__doc__ is None else \
component.__doc__.split('\n')[0][:43] + '..'
# append the component name (up to 25 characters + one space) and the
# start of the documentation as one line to the current doc
__doc__ += key[:25] + ' ' * (26 - len(key)) + second_part + '\n'
# delete all the temporary things from the namespace once done
# so that they don't show up in the auto-complete
del key, _keys, component, second_part
|
francisco-dlp/hyperspy
|
hyperspy/components1d.py
|
Python
|
gpl-3.0
| 3,552
|
[
"Gaussian"
] |
58ad9cfbc21e36cc72cad2cfe2bae764e67e687a5865a5d8c69c855ec40a726e
|
# asciixmas
# December 1989 Larry Bartz Indianapolis, IN
#
# $Id$
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# Just like the ones I used to know!
# Via a full duplex communications channel,
# At 9600 bits per second,
# Even though it's kinda slow.
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# With ev'ry C program I write!
# May your screen be merry and bright!
# And may all your Christmases be amber or green,
# (for reduced eyestrain and improved visibility)!
#
#
# Notes on the Python version:
# I used a couple of `try...except curses.error' to get around some functions
# returning ERR. The errors come from using wrapping functions to fill
# windows to the last character cell. The C version doesn't have this problem,
# it simply ignores any return values.
#
import curses
import sys
FROMWHO = "Thomas Gellekum <tg@FreeBSD.org>"
def set_color(win, color):
if curses.has_colors():
n = color + 1
curses.init_pair(n, color, my_bg)
win.attroff(curses.A_COLOR)
win.attron(curses.color_pair(n))
def unset_color(win):
if curses.has_colors():
win.attrset(curses.color_pair(0))
def look_out(msecs):
curses.napms(msecs)
if stdscr.getch() != -1:
curses.beep()
sys.exit(0)
def boxit():
for y in range(0, 20):
stdscr.addch(y, 7, ord('|'))
for x in range(8, 80):
stdscr.addch(19, x, ord('_'))
for x in range(0, 80):
stdscr.addch(22, x, ord('_'))
return
def seas():
stdscr.addch(4, 1, ord('S'))
stdscr.addch(6, 1, ord('E'))
stdscr.addch(8, 1, ord('A'))
stdscr.addch(10, 1, ord('S'))
stdscr.addch(12, 1, ord('O'))
stdscr.addch(14, 1, ord('N'))
stdscr.addch(16, 1, ord("'"))
stdscr.addch(18, 1, ord('S'))
return
def greet():
stdscr.addch(3, 5, ord('G'))
stdscr.addch(5, 5, ord('R'))
stdscr.addch(7, 5, ord('E'))
stdscr.addch(9, 5, ord('E'))
stdscr.addch(11, 5, ord('T'))
stdscr.addch(13, 5, ord('I'))
stdscr.addch(15, 5, ord('N'))
stdscr.addch(17, 5, ord('G'))
stdscr.addch(19, 5, ord('S'))
return
def fromwho():
stdscr.addstr(21, 13, FROMWHO)
return
def tree():
set_color(treescrn, curses.COLOR_GREEN)
treescrn.addch(1, 11, ord('/'))
treescrn.addch(2, 11, ord('/'))
treescrn.addch(3, 10, ord('/'))
treescrn.addch(4, 9, ord('/'))
treescrn.addch(5, 9, ord('/'))
treescrn.addch(6, 8, ord('/'))
treescrn.addch(7, 7, ord('/'))
treescrn.addch(8, 6, ord('/'))
treescrn.addch(9, 6, ord('/'))
treescrn.addch(10, 5, ord('/'))
treescrn.addch(11, 3, ord('/'))
treescrn.addch(12, 2, ord('/'))
treescrn.addch(1, 13, ord('\\'))
treescrn.addch(2, 13, ord('\\'))
treescrn.addch(3, 14, ord('\\'))
treescrn.addch(4, 15, ord('\\'))
treescrn.addch(5, 15, ord('\\'))
treescrn.addch(6, 16, ord('\\'))
treescrn.addch(7, 17, ord('\\'))
treescrn.addch(8, 18, ord('\\'))
treescrn.addch(9, 18, ord('\\'))
treescrn.addch(10, 19, ord('\\'))
treescrn.addch(11, 21, ord('\\'))
treescrn.addch(12, 22, ord('\\'))
treescrn.addch(4, 10, ord('_'))
treescrn.addch(4, 14, ord('_'))
treescrn.addch(8, 7, ord('_'))
treescrn.addch(8, 17, ord('_'))
treescrn.addstr(13, 0, "//////////// \\\\\\\\\\\\\\\\\\\\\\\\")
treescrn.addstr(14, 11, "| |")
treescrn.addstr(15, 11, "|_|")
unset_color(treescrn)
treescrn.refresh()
w_del_msg.refresh()
return
def balls():
treescrn.overlay(treescrn2)
set_color(treescrn2, curses.COLOR_BLUE)
treescrn2.addch(3, 9, ord('@'))
treescrn2.addch(3, 15, ord('@'))
treescrn2.addch(4, 8, ord('@'))
treescrn2.addch(4, 16, ord('@'))
treescrn2.addch(5, 7, ord('@'))
treescrn2.addch(5, 17, ord('@'))
treescrn2.addch(7, 6, ord('@'))
treescrn2.addch(7, 18, ord('@'))
treescrn2.addch(8, 5, ord('@'))
treescrn2.addch(8, 19, ord('@'))
treescrn2.addch(10, 4, ord('@'))
treescrn2.addch(10, 20, ord('@'))
treescrn2.addch(11, 2, ord('@'))
treescrn2.addch(11, 22, ord('@'))
treescrn2.addch(12, 1, ord('@'))
treescrn2.addch(12, 23, ord('@'))
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def star():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_YELLOW)
treescrn2.addch(0, 12, ord('*'))
treescrn2.standend()
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng1():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(3, 13, ord('\''))
treescrn2.addch(3, 12, ord(':'))
treescrn2.addch(3, 11, ord('.'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng2():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(5, 14, ord('\''))
treescrn2.addch(5, 13, ord(':'))
treescrn2.addch(5, 12, ord('.'))
treescrn2.addch(5, 11, ord(','))
treescrn2.addch(6, 10, ord('\''))
treescrn2.addch(6, 9, ord(':'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng3():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(7, 16, ord('\''))
treescrn2.addch(7, 15, ord(':'))
treescrn2.addch(7, 14, ord('.'))
treescrn2.addch(7, 13, ord(','))
treescrn2.addch(8, 12, ord('\''))
treescrn2.addch(8, 11, ord(':'))
treescrn2.addch(8, 10, ord('.'))
treescrn2.addch(8, 9, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng4():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(9, 17, ord('\''))
treescrn2.addch(9, 16, ord(':'))
treescrn2.addch(9, 15, ord('.'))
treescrn2.addch(9, 14, ord(','))
treescrn2.addch(10, 13, ord('\''))
treescrn2.addch(10, 12, ord(':'))
treescrn2.addch(10, 11, ord('.'))
treescrn2.addch(10, 10, ord(','))
treescrn2.addch(11, 9, ord('\''))
treescrn2.addch(11, 8, ord(':'))
treescrn2.addch(11, 7, ord('.'))
treescrn2.addch(11, 6, ord(','))
treescrn2.addch(12, 5, ord('\''))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng5():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(11, 19, ord('\''))
treescrn2.addch(11, 18, ord(':'))
treescrn2.addch(11, 17, ord('.'))
treescrn2.addch(11, 16, ord(','))
treescrn2.addch(12, 15, ord('\''))
treescrn2.addch(12, 14, ord(':'))
treescrn2.addch(12, 13, ord('.'))
treescrn2.addch(12, 12, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
# save a fully lit tree
treescrn2.overlay(treescrn)
treescrn2.refresh()
w_del_msg.refresh()
return
def blinkit():
treescrn8.touchwin()
for cycle in range(5):
if cycle == 0:
treescrn3.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 1:
treescrn4.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 2:
treescrn5.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 3:
treescrn6.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 4:
treescrn7.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
treescrn8.touchwin()
# ALL ON
treescrn.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
return
def deer_step(win, y, x):
win.mvwin(y, x)
win.refresh()
w_del_msg.refresh()
look_out(5)
def reindeer():
y_pos = 0
for x_pos in range(70, 62, -1):
if x_pos < 66: y_pos = 1
for looper in range(0, 4):
dotdeer0.addch(y_pos, x_pos, ord('.'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
look_out(50)
y_pos = 2
for x_pos in range(x_pos - 1, 50, -1):
for looper in range(0, 4):
if x_pos < 56:
y_pos = 3
try:
stardeer0.addch(y_pos, x_pos, ord('*'))
except curses.error:
pass
stardeer0.refresh()
w_del_msg.refresh()
stardeer0.erase()
stardeer0.refresh()
w_del_msg.refresh()
else:
dotdeer0.addch(y_pos, x_pos, ord('*'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
x_pos = 58
for y_pos in range(2, 5):
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 4):
deer_step(lildeer3, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer1, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer3, y_pos, x_pos)
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
x_pos -= 2
x_pos = 35
for y_pos in range(5, 10):
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
for looper in range(2):
deer_step(middeer3, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer1, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer3, y_pos, x_pos)
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
x_pos -= 3
look_out(300)
y_pos = 1
for x_pos in range(8, 16):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
x_pos -= 1
for looper in range(0, 6):
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer1, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer0, y_pos, x_pos)
for y_pos in range(y_pos, 10):
for looper in range(0, 2):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
y_pos -= 1
deer_step(lookdeer3, y_pos, x_pos)
return
def main(win):
global stdscr
stdscr = win
global my_bg, y_pos, x_pos
global treescrn, treescrn2, treescrn3, treescrn4
global treescrn5, treescrn6, treescrn7, treescrn8
global dotdeer0, stardeer0
global lildeer0, lildeer1, lildeer2, lildeer3
global middeer0, middeer1, middeer2, middeer3
global bigdeer0, bigdeer1, bigdeer2, bigdeer3, bigdeer4
global lookdeer0, lookdeer1, lookdeer2, lookdeer3, lookdeer4
global w_holiday, w_del_msg
my_bg = curses.COLOR_BLACK
# curses.curs_set(0)
treescrn = curses.newwin(16, 27, 3, 53)
treescrn2 = curses.newwin(16, 27, 3, 53)
treescrn3 = curses.newwin(16, 27, 3, 53)
treescrn4 = curses.newwin(16, 27, 3, 53)
treescrn5 = curses.newwin(16, 27, 3, 53)
treescrn6 = curses.newwin(16, 27, 3, 53)
treescrn7 = curses.newwin(16, 27, 3, 53)
treescrn8 = curses.newwin(16, 27, 3, 53)
dotdeer0 = curses.newwin(3, 71, 0, 8)
stardeer0 = curses.newwin(4, 56, 0, 8)
lildeer0 = curses.newwin(7, 53, 0, 8)
lildeer1 = curses.newwin(2, 4, 0, 0)
lildeer2 = curses.newwin(2, 4, 0, 0)
lildeer3 = curses.newwin(2, 4, 0, 0)
middeer0 = curses.newwin(15, 42, 0, 8)
middeer1 = curses.newwin(3, 7, 0, 0)
middeer2 = curses.newwin(3, 7, 0, 0)
middeer3 = curses.newwin(3, 7, 0, 0)
bigdeer0 = curses.newwin(10, 23, 0, 0)
bigdeer1 = curses.newwin(10, 23, 0, 0)
bigdeer2 = curses.newwin(10, 23, 0, 0)
bigdeer3 = curses.newwin(10, 23, 0, 0)
bigdeer4 = curses.newwin(10, 23, 0, 0)
lookdeer0 = curses.newwin(10, 25, 0, 0)
lookdeer1 = curses.newwin(10, 25, 0, 0)
lookdeer2 = curses.newwin(10, 25, 0, 0)
lookdeer3 = curses.newwin(10, 25, 0, 0)
lookdeer4 = curses.newwin(10, 25, 0, 0)
w_holiday = curses.newwin(1, 27, 3, 27)
w_del_msg = curses.newwin(1, 20, 23, 60)
try:
w_del_msg.addstr(0, 0, "Hit any key to quit")
except curses.error:
pass
try:
w_holiday.addstr(0, 0, "H A P P Y H O L I D A Y S")
except curses.error:
pass
# set up the windows for our various reindeer
lildeer1.addch(0, 0, ord('V'))
lildeer1.addch(1, 0, ord('@'))
lildeer1.addch(1, 1, ord('<'))
lildeer1.addch(1, 2, ord('>'))
try:
lildeer1.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer2.addch(0, 0, ord('V'))
lildeer2.addch(1, 0, ord('@'))
lildeer2.addch(1, 1, ord('|'))
lildeer2.addch(1, 2, ord('|'))
try:
lildeer2.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer3.addch(0, 0, ord('V'))
lildeer3.addch(1, 0, ord('@'))
lildeer3.addch(1, 1, ord('>'))
lildeer3.addch(1, 2, ord('<'))
try:
lildeer2.addch(1, 3, ord('~')) # XXX
except curses.error:
pass
middeer1.addch(0, 2, ord('y'))
middeer1.addch(0, 3, ord('y'))
middeer1.addch(1, 2, ord('0'))
middeer1.addch(1, 3, ord('('))
middeer1.addch(1, 4, ord('='))
middeer1.addch(1, 5, ord(')'))
middeer1.addch(1, 6, ord('~'))
middeer1.addch(2, 3, ord('\\'))
middeer1.addch(2, 5, ord('/'))
middeer2.addch(0, 2, ord('y'))
middeer2.addch(0, 3, ord('y'))
middeer2.addch(1, 2, ord('0'))
middeer2.addch(1, 3, ord('('))
middeer2.addch(1, 4, ord('='))
middeer2.addch(1, 5, ord(')'))
middeer2.addch(1, 6, ord('~'))
middeer2.addch(2, 3, ord('|'))
middeer2.addch(2, 5, ord('|'))
middeer3.addch(0, 2, ord('y'))
middeer3.addch(0, 3, ord('y'))
middeer3.addch(1, 2, ord('0'))
middeer3.addch(1, 3, ord('('))
middeer3.addch(1, 4, ord('='))
middeer3.addch(1, 5, ord(')'))
middeer3.addch(1, 6, ord('~'))
middeer3.addch(2, 3, ord('/'))
middeer3.addch(2, 5, ord('\\'))
bigdeer1.addch(0, 17, ord('\\'))
bigdeer1.addch(0, 18, ord('/'))
bigdeer1.addch(0, 19, ord('\\'))
bigdeer1.addch(0, 20, ord('/'))
bigdeer1.addch(1, 18, ord('\\'))
bigdeer1.addch(1, 20, ord('/'))
bigdeer1.addch(2, 19, ord('|'))
bigdeer1.addch(2, 20, ord('_'))
bigdeer1.addch(3, 18, ord('/'))
bigdeer1.addch(3, 19, ord('^'))
bigdeer1.addch(3, 20, ord('0'))
bigdeer1.addch(3, 21, ord('\\'))
bigdeer1.addch(4, 17, ord('/'))
bigdeer1.addch(4, 18, ord('/'))
bigdeer1.addch(4, 19, ord('\\'))
bigdeer1.addch(4, 22, ord('\\'))
bigdeer1.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer1.addstr(6, 7, "( \\_____( /") # ))
bigdeer1.addstr(7, 8, "( ) /")
bigdeer1.addstr(8, 9, "\\\\ /")
bigdeer1.addstr(9, 11, "\\>/>")
bigdeer2.addch(0, 17, ord('\\'))
bigdeer2.addch(0, 18, ord('/'))
bigdeer2.addch(0, 19, ord('\\'))
bigdeer2.addch(0, 20, ord('/'))
bigdeer2.addch(1, 18, ord('\\'))
bigdeer2.addch(1, 20, ord('/'))
bigdeer2.addch(2, 19, ord('|'))
bigdeer2.addch(2, 20, ord('_'))
bigdeer2.addch(3, 18, ord('/'))
bigdeer2.addch(3, 19, ord('^'))
bigdeer2.addch(3, 20, ord('0'))
bigdeer2.addch(3, 21, ord('\\'))
bigdeer2.addch(4, 17, ord('/'))
bigdeer2.addch(4, 18, ord('/'))
bigdeer2.addch(4, 19, ord('\\'))
bigdeer2.addch(4, 22, ord('\\'))
bigdeer2.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer2.addstr(6, 7, "(( )____( /") # ))
bigdeer2.addstr(7, 7, "( / |")
bigdeer2.addstr(8, 8, "\\/ |")
bigdeer2.addstr(9, 9, "|> |>")
bigdeer3.addch(0, 17, ord('\\'))
bigdeer3.addch(0, 18, ord('/'))
bigdeer3.addch(0, 19, ord('\\'))
bigdeer3.addch(0, 20, ord('/'))
bigdeer3.addch(1, 18, ord('\\'))
bigdeer3.addch(1, 20, ord('/'))
bigdeer3.addch(2, 19, ord('|'))
bigdeer3.addch(2, 20, ord('_'))
bigdeer3.addch(3, 18, ord('/'))
bigdeer3.addch(3, 19, ord('^'))
bigdeer3.addch(3, 20, ord('0'))
bigdeer3.addch(3, 21, ord('\\'))
bigdeer3.addch(4, 17, ord('/'))
bigdeer3.addch(4, 18, ord('/'))
bigdeer3.addch(4, 19, ord('\\'))
bigdeer3.addch(4, 22, ord('\\'))
bigdeer3.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer3.addstr(6, 6, "( ()_____( /") # ))
bigdeer3.addstr(7, 6, "/ / /")
bigdeer3.addstr(8, 5, "|/ \\")
bigdeer3.addstr(9, 5, "/> \\>")
bigdeer4.addch(0, 17, ord('\\'))
bigdeer4.addch(0, 18, ord('/'))
bigdeer4.addch(0, 19, ord('\\'))
bigdeer4.addch(0, 20, ord('/'))
bigdeer4.addch(1, 18, ord('\\'))
bigdeer4.addch(1, 20, ord('/'))
bigdeer4.addch(2, 19, ord('|'))
bigdeer4.addch(2, 20, ord('_'))
bigdeer4.addch(3, 18, ord('/'))
bigdeer4.addch(3, 19, ord('^'))
bigdeer4.addch(3, 20, ord('0'))
bigdeer4.addch(3, 21, ord('\\'))
bigdeer4.addch(4, 17, ord('/'))
bigdeer4.addch(4, 18, ord('/'))
bigdeer4.addch(4, 19, ord('\\'))
bigdeer4.addch(4, 22, ord('\\'))
bigdeer4.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer4.addstr(6, 6, "( )______( /") # )
bigdeer4.addstr(7, 5, "(/ \\") # )
bigdeer4.addstr(8, 0, "v___= ----^")
lookdeer1.addstr(0, 16, "\\/ \\/")
lookdeer1.addstr(1, 17, "\\Y/ \\Y/")
lookdeer1.addstr(2, 19, "\\=/")
lookdeer1.addstr(3, 17, "^\\o o/^")
lookdeer1.addstr(4, 17, "//( )")
lookdeer1.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer1.addstr(6, 7, "( \\_____( /") # ))
lookdeer1.addstr(7, 8, "( ) /")
lookdeer1.addstr(8, 9, "\\\\ /")
lookdeer1.addstr(9, 11, "\\>/>")
lookdeer2.addstr(0, 16, "\\/ \\/")
lookdeer2.addstr(1, 17, "\\Y/ \\Y/")
lookdeer2.addstr(2, 19, "\\=/")
lookdeer2.addstr(3, 17, "^\\o o/^")
lookdeer2.addstr(4, 17, "//( )")
lookdeer2.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer2.addstr(6, 7, "(( )____( /") # ))
lookdeer2.addstr(7, 7, "( / |")
lookdeer2.addstr(8, 8, "\\/ |")
lookdeer2.addstr(9, 9, "|> |>")
lookdeer3.addstr(0, 16, "\\/ \\/")
lookdeer3.addstr(1, 17, "\\Y/ \\Y/")
lookdeer3.addstr(2, 19, "\\=/")
lookdeer3.addstr(3, 17, "^\\o o/^")
lookdeer3.addstr(4, 17, "//( )")
lookdeer3.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer3.addstr(6, 6, "( ()_____( /") # ))
lookdeer3.addstr(7, 6, "/ / /")
lookdeer3.addstr(8, 5, "|/ \\")
lookdeer3.addstr(9, 5, "/> \\>")
lookdeer4.addstr(0, 16, "\\/ \\/")
lookdeer4.addstr(1, 17, "\\Y/ \\Y/")
lookdeer4.addstr(2, 19, "\\=/")
lookdeer4.addstr(3, 17, "^\\o o/^")
lookdeer4.addstr(4, 17, "//( )")
lookdeer4.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer4.addstr(6, 6, "( )______( /") # )
lookdeer4.addstr(7, 5, "(/ \\") # )
lookdeer4.addstr(8, 0, "v___= ----^")
###############################################
curses.cbreak()
stdscr.nodelay(1)
while 1:
stdscr.clear()
treescrn.erase()
w_del_msg.touchwin()
treescrn.touchwin()
treescrn2.erase()
treescrn2.touchwin()
treescrn8.erase()
treescrn8.touchwin()
stdscr.refresh()
look_out(150)
boxit()
stdscr.refresh()
look_out(150)
seas()
stdscr.refresh()
greet()
stdscr.refresh()
look_out(150)
fromwho()
stdscr.refresh()
look_out(150)
tree()
look_out(150)
balls()
look_out(150)
star()
look_out(150)
strng1()
strng2()
strng3()
strng4()
strng5()
# set up the windows for our blinking trees
#
# treescrn3
treescrn.overlay(treescrn3)
# balls
treescrn3.addch(4, 18, ord(' '))
treescrn3.addch(7, 6, ord(' '))
treescrn3.addch(8, 19, ord(' '))
treescrn3.addch(11, 22, ord(' '))
# star
treescrn3.addch(0, 12, ord('*'))
# strng1
treescrn3.addch(3, 11, ord(' '))
# strng2
treescrn3.addch(5, 13, ord(' '))
treescrn3.addch(6, 10, ord(' '))
# strng3
treescrn3.addch(7, 16, ord(' '))
treescrn3.addch(7, 14, ord(' '))
# strng4
treescrn3.addch(10, 13, ord(' '))
treescrn3.addch(10, 10, ord(' '))
treescrn3.addch(11, 8, ord(' '))
# strng5
treescrn3.addch(11, 18, ord(' '))
treescrn3.addch(12, 13, ord(' '))
# treescrn4
treescrn.overlay(treescrn4)
# balls
treescrn4.addch(3, 9, ord(' '))
treescrn4.addch(4, 16, ord(' '))
treescrn4.addch(7, 6, ord(' '))
treescrn4.addch(8, 19, ord(' '))
treescrn4.addch(11, 2, ord(' '))
treescrn4.addch(12, 23, ord(' '))
# star
treescrn4.standout()
treescrn4.addch(0, 12, ord('*'))
treescrn4.standend()
# strng1
treescrn4.addch(3, 13, ord(' '))
# strng2
# strng3
treescrn4.addch(7, 15, ord(' '))
treescrn4.addch(8, 11, ord(' '))
# strng4
treescrn4.addch(9, 16, ord(' '))
treescrn4.addch(10, 12, ord(' '))
treescrn4.addch(11, 8, ord(' '))
# strng5
treescrn4.addch(11, 18, ord(' '))
treescrn4.addch(12, 14, ord(' '))
# treescrn5
treescrn.overlay(treescrn5)
# balls
treescrn5.addch(3, 15, ord(' '))
treescrn5.addch(10, 20, ord(' '))
treescrn5.addch(12, 1, ord(' '))
# star
treescrn5.addch(0, 12, ord(' '))
# strng1
treescrn5.addch(3, 11, ord(' '))
# strng2
treescrn5.addch(5, 12, ord(' '))
# strng3
treescrn5.addch(7, 14, ord(' '))
treescrn5.addch(8, 10, ord(' '))
# strng4
treescrn5.addch(9, 15, ord(' '))
treescrn5.addch(10, 11, ord(' '))
treescrn5.addch(11, 7, ord(' '))
# strng5
treescrn5.addch(11, 17, ord(' '))
treescrn5.addch(12, 13, ord(' '))
# treescrn6
treescrn.overlay(treescrn6)
# balls
treescrn6.addch(6, 7, ord(' '))
treescrn6.addch(7, 18, ord(' '))
treescrn6.addch(10, 4, ord(' '))
treescrn6.addch(11, 23, ord(' '))
# star
treescrn6.standout()
treescrn6.addch(0, 12, ord('*'))
treescrn6.standend()
# strng1
# strng2
treescrn6.addch(5, 11, ord(' '))
# strng3
treescrn6.addch(7, 13, ord(' '))
treescrn6.addch(8, 9, ord(' '))
# strng4
treescrn6.addch(9, 14, ord(' '))
treescrn6.addch(10, 10, ord(' '))
treescrn6.addch(11, 6, ord(' '))
# strng5
treescrn6.addch(11, 16, ord(' '))
treescrn6.addch(12, 12, ord(' '))
# treescrn7
treescrn.overlay(treescrn7)
# balls
treescrn7.addch(3, 15, ord(' '))
treescrn7.addch(6, 7, ord(' '))
treescrn7.addch(7, 18, ord(' '))
treescrn7.addch(10, 4, ord(' '))
treescrn7.addch(11, 22, ord(' '))
# star
treescrn7.addch(0, 12, ord('*'))
# strng1
treescrn7.addch(3, 12, ord(' '))
# strng2
treescrn7.addch(5, 13, ord(' '))
treescrn7.addch(6, 9, ord(' '))
# strng3
treescrn7.addch(7, 15, ord(' '))
treescrn7.addch(8, 11, ord(' '))
# strng4
treescrn7.addch(9, 16, ord(' '))
treescrn7.addch(10, 12, ord(' '))
treescrn7.addch(11, 8, ord(' '))
# strng5
treescrn7.addch(11, 18, ord(' '))
treescrn7.addch(12, 14, ord(' '))
look_out(150)
reindeer()
w_holiday.touchwin()
w_holiday.refresh()
w_del_msg.refresh()
look_out(500)
for i in range(0, 20):
blinkit()
curses.wrapper(main)
|
teeple/pns_server
|
work/install/Python-2.7.4/Demo/curses/xmas.py
|
Python
|
gpl-2.0
| 25,446
|
[
"Amber"
] |
b12576cf11d4b3c462884198a09cd45828431f7fc8061906c69acda6748b16a5
|
from six import assertRaisesRegex
from unittest import TestCase
from tempfile import mkstemp
from os import close, unlink, write
from contextlib import contextmanager
from pysam import CHARD_CLIP, CMATCH
from dark.reads import Read, ReadFilter
from dark.sam import (
PaddedSAM, SAMFilter, UnequalReferenceLengthError, UnknownReference,
InvalidSAM, samReferencesToStr, _hardClip)
# These tests actually use the filesystem to read files. That's due to the API
# to pysam and the fact that it calls a C function to open files, so we can't
# mock Python's 'open' method. Hence the following context manager.
@contextmanager
def dataFile(data):
"""
Create a context manager to store data in a temporary file and
later remove it.
"""
fd, filename = mkstemp()
write(fd, data.encode('utf-8'))
close(fd)
yield filename
unlink(filename)
class TestSAMFilter(TestCase):
"""
Test the SAMFilter class.
"""
def testUnknownReferences(self):
"""
Passing an unknown reference id to the referenceLengths method must
result in an UnknownReference exception.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:90',
]).replace(' ', '\t')
with dataFile(data) as filename:
sam = SAMFilter(filename, referenceIds={'unknown'})
error = ("^Reference 'unknown' is not present in the "
"SAM/BAM file\\.$")
assertRaisesRegex(self, UnknownReference, error,
sam.referenceLengths)
def testStoreQueryIds(self):
"""
If we request that query ids are saved, they must be.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2= * 0 0 TC XY',
'query2 0 ref1 2 60 2= * 0 0 TC XY',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, storeQueryIds=True)
list(sf.alignments())
self.assertEqual({'query1', 'query2'}, sf.queryIds)
def testAlignmentCount(self):
"""
When all queries have been yielded, the alignment count must be
as expected.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2= * 0 0 TC XY',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename)
list(sf.alignments())
self.assertEqual(2, sf.alignmentCount)
def testMinLength(self):
"""
A request for reads that are only longer than a certain value should
result in the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
filterRead = ReadFilter(minLength=6).filter
sf = SAMFilter(filename, filterRead=filterRead)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testDropSecondary(self):
"""
Dropping matches flagged as secondary must give the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 256 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, dropSecondary=True)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testDropSupplementary(self):
"""
Dropping matches flagged as supplementary must give the expected
result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 2048 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, dropSupplementary=True)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testDropDuplicates(self):
"""
Dropping matches flagged as optical or PCR duplicates must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 1024 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, dropDuplicates=True)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testKeepQualityControlFailures(self):
"""
Keeping matches flagged as quality control failures must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 512 ref1 4 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, keepQCFailures=True)
(alignment1, alignment2) = list(sf.alignments())
self.assertEqual('query1', alignment1.query_name)
self.assertEqual('query2', alignment2.query_name)
def testMinScoreNoScores(self):
"""
A request for reads with alignment scores no lower than a given value
must produce an empty result when no alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, minScore=6)
self.assertEqual([], list(sf.alignments()))
def testMinScore(self):
"""
A request for reads with alignment scores no lower than a given value
must produce the expected result when some alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:10',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:3',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, minScore=6)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
def testMaxScoreNoScores(self):
"""
A request for reads with alignment scores no higher than a given value
must produce an empty result when no alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, maxScore=6)
self.assertEqual([], list(sf.alignments()))
def testMaxScore(self):
"""
A request for reads with alignment scores no higher than a given value
must produce the expected result when some alignments have scores.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:10',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
'query3 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:3',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, maxScore=6)
(alignment,) = list(sf.alignments())
self.assertEqual('query3', alignment.query_name)
def testMinAndMaxScore(self):
"""
A request for reads with alignment scores no lower or higher than
given values must produce the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:10',
'query2 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:12',
'query3 0 ref1 2 60 2= * 0 0 TC ZZ',
'query4 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:3',
'query5 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ AS:i:2',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename, minScore=3, maxScore=10)
(alignment1, alignment2) = list(sf.alignments())
self.assertEqual('query1', alignment1.query_name)
self.assertEqual('query4', alignment2.query_name)
def testCloseButNoCIGAR(self):
"""
An unmapped query with no CIGAR string must be passed through
unchanged if dropUnmapped is not specified.
"""
data = '\n'.join([
'@SQ SN:ref LN:10',
'query1 4 * 0 0 * * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
self.assertEqual('TCTAGG', alignment.query_sequence)
self.assertEqual('ZZZZZZ', ''.join(
map(lambda x: chr(x + 33), alignment.query_qualities)))
def testNoQuality(self):
"""
If an alignment has * for the quality string, the filter must
return an alignment with a C{None} quality value.
"""
data = '\n'.join([
'@SQ SN:ref LN:10',
'query1 4 * 0 0 6M * 0 0 TCTAGG *',
]).replace(' ', '\t')
with dataFile(data) as filename:
sf = SAMFilter(filename)
(alignment,) = list(sf.alignments())
self.assertEqual('query1', alignment.query_name)
self.assertEqual('TCTAGG', alignment.query_sequence)
self.assertIsNone(alignment.query_qualities)
class TestPaddedSAM(TestCase):
"""
Test the PaddedSAM class.
"""
# In reading the tests below, it is important to remember that the start
# position (in the reference) of the match in SAM format is 1-based. This
# is the 4th field in the non-header SAM lines (i.e., those that don't
# start with @). If you look at the code in ../dark/sam.py, pysam provides
# a 'reference_start' attribute that is 0-based.
def testUnequalReferenceLengths(self):
"""
Passing no reference ids when the references have different lengths
must result in an UnequalReferenceLengthError exception.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:91',
]).replace(' ', '\t')
with dataFile(data) as filename:
error = ('^Your 2 SAM/BAM file reference sequence lengths '
'\\(id1=90, id2=91\\) are not all identical\\.$')
assertRaisesRegex(self, UnequalReferenceLengthError, error,
PaddedSAM, SAMFilter(filename))
def testQueryTooLong(self):
"""
If the query sequence is longer than the total of the lengths in the
CIGAR operations, a ValueError must be raised.
"""
# This test just returns. It used to be possible to reach the
# "Query ... not fully consumed when parsing CIGAR string."
# ValueError in sam.py, prior to the fix of
# https://github.com/acorg/dark-matter/issues/630 but it is not
# possible to get a CIGAR string that has a different total length
# from the sequence length through to our code in sam.py because
# pysam catches the error. I'm leaving this test here because it
# documents that the error checked for in sam.py cannot currently
# be reached and the test may become useful. For now it just returns.
return
data = '\n'.join([
'@SQ SN:ref1 LN:90',
'query1 0 ref1 1 60 4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^Query TCTAGG not fully consumed when parsing CIGAR '
'string\\.')
assertRaisesRegex(self, ValueError, error, list, ps.queries())
def testAllMMatch(self):
"""
A simple all-'M' match must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 6M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testMixedMatch(self):
"""
A match that is a mix of M, =, and X must result in the expected
padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testHardClipLeft(self):
"""
A simple all-'M' match with a hard clip left must result in the
expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 10H6M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testHardClipRight(self):
"""
A simple all-'M' match with a hard clip right must result in the
expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 6M10H * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testRcNeeded(self):
"""
A reverse-complemented match (flag = 16) when rcNeeded=True is passed
must result in the expected (reverse complemented) padded sequence
and reversed quality string.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 16 ref1 2 60 6M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(rcNeeded=True))
self.assertEqual(Read('query1', '-CCTAGA---', '!654321!!!'), read)
def testRcSuffix(self):
"""
A reverse-complemented sequence should have the rcSuffix string added
to its id when an rcSuffix value is passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 16 ref1 2 60 6M * 0 0 TCTAGG 123456',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(rcSuffix='-rc', rcNeeded=True))
self.assertEqual(Read('query1-rc', '-CCTAGA---', '!654321!!!'),
read)
def testQuerySoftClipLeft(self):
"""
A match with a soft-clipped region that does not extend to the left of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 60 2S4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testQuerySoftClipReachesLeftEdge(self):
"""
A match with a soft-clipped region that reaches to the left edge of the
reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 5 60 4S2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'TCTAGG----', 'ZZZZZZ!!!!'), read)
def testQuerySoftClipProtrudesLeft(self):
"""
A match with a soft-clipped region that extends to the left of the
reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 4S2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'AGG-------', 'ZZZ!!!!!!!'), read)
def testKF414679SoftClipLeft(self):
"""
Test for a case that wasn't working.
"""
seq = ('GCCATGCAGTGGAACTCCACAGCATTCCACCAAGCTCTGC'
'AGAATCCCAAAGTCAGGGGTTTGTATCTTCTTGCTGGTGGC')
quality = ('ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ')
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 5 60 18S63M * 0 0 %s %s' % (seq, quality),
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', seq[14:], quality[14:]), read)
def testQuerySoftClipRight(self):
"""
A match with a soft-clipped region that does not extend to the right of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 60 4M2S * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '---TCTAGG-', '!!!ZZZZZZ!'), read)
def testQuerySoftClipReachesRightEdge(self):
"""
A match with a soft-clipped region that reaches to the right edge of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 5 60 2M4S * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '----TCTAGG', '!!!!ZZZZZZ'), read)
def testQuerySoftClipProtrudesRight(self):
"""
A match with a soft-clipped region that extends to the right of
the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 6 60 2M4S * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-----TCTAG', '!!!!!ZZZZZ'), read)
def testQuerySoftClipProtrudesBothSides(self):
"""
A match with a soft-clipped region that extends to both the left and
right of the reference must result in the expected padded sequence.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 60 5S5M5S * 0 0 TCTAGGCTGACTAAG ZZZZZZZZZZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'TAGGCTGACT', 'ZZZZZZZZZZ'), read)
def testQueryHardClipAndSoftClipProtrudesBothSides(self):
"""
A match with a soft-clipped region that extends to both the left and
right of the reference must result in the expected padded sequence
when hard clipping is also indicated by the CIGAR string.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 4 0 3H5S5M4S5H * 0 0 TCTAGGCTGACTAA ZZZZZZZZZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', 'TAGGCTGACT', 'ZZZZZZZZZZ'), read)
def testReferenceInsertion(self):
"""
An insertion into the reference must result in the expected padded
sequence and the expected value in the referenceInsertions dictionary.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2I2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCGG-----', '!ZZZZ!!!!!'), read)
self.assertEqual(
{
'query1': [(3, 'TA')],
},
ps.referenceInsertions)
def testPrimaryAndSecondaryReferenceInsertion(self):
"""
A primary and secondary insertion into the reference (of the same
query) must result in the expected padded sequences and the expected
value in the referenceInsertions dictionary.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2I2M * 0 0 TCTAGG ZZZZZZ',
'query1 256 ref1 4 60 2M3I1M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries())
self.assertEqual(Read('query1', '-TCGG-----', '!ZZZZ!!!!!'), read1)
self.assertEqual(Read('query1/1', '---TCG----', '!!!ZZZ!!!!'),
read2)
self.assertEqual(
{
'query1': [(3, 'TA')],
'query1/1': [(5, 'TAG')],
},
ps.referenceInsertions)
def testReferenceDeletion(self):
"""
An deletion of reference bases must result in the expected padded
sequence (with Ns inserted for the deleted reference bases).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2D4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCNNTAGG-', '!ZZ!!ZZZZ!'), read)
def testReferenceDeletionAlternateChars(self):
"""
An deletion of reference bases must result in the expected padded
sequence (with the passed query insertion character and unknown
quality character) when queryInsertionChar and unknownQualityChar
arguments are passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2D4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(queryInsertionChar='?',
unknownQualityChar='+'))
self.assertEqual(Read('query1', '-TC??TAGG-', '+ZZ++ZZZZ+'), read)
def testReferenceSkip(self):
"""
An skip of reference bases must result in the expected padded
sequence with the passed unknown quality character when the
unknownQualityChar argument is passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2N4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(unknownQualityChar='.'))
self.assertEqual(Read('query1', '-TCNNTAGG-', '.ZZ..ZZZZ.'), read)
def testReferenceSkipAlternateChars(self):
"""
An skip of reference bases must result in the expected padded
sequence (with the passed query insertion character and unknown
quality character) when queryInsertionChar and unknownQualityChar
arguments are passed.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2M2N4M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read,) = list(ps.queries(queryInsertionChar='X',
unknownQualityChar='+'))
self.assertEqual(Read('query1', '-TCXXTAGG-', '+ZZ++ZZZZ+'), read)
def testMixedMatchSpecificReferenceButNoMatches(self):
"""
A request for reads aligned against a reference that exists but that
has no matches must result in an empty list.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:15',
'@SQ SN:ref2 LN:15',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, referenceIds={'ref2'}))
self.assertEqual([], list(ps.queries()))
def testMixedMatchSpecificReference(self):
"""
A match that is a mix of M, =, and X must result in the expected
padded sequence when a reference sequence is specified.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, referenceIds={'ref1'}))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testMinLength(self):
"""
A request for reads that are only longer than a certain value should
result in the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 0 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
filterRead = ReadFilter(minLength=6).filter
ps = PaddedSAM(SAMFilter(filename, filterRead=filterRead))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testDropSecondary(self):
"""
Dropping matches flagged as secondary must give the expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 256 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, dropSecondary=True))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testDropSupplementary(self):
"""
Dropping matches flagged as supplementary must give the expected
result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 2048 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, dropSupplementary=True))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testDropDuplicates(self):
"""
Dropping matches flagged as optical or PCR duplicates must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 1024 ref1 2 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, dropDuplicates=True))
(read,) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read)
def testAllowDuplicateIds(self):
"""
It must be possible to allow duplicate ids (in this case due to a
secondary match).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 0 ref1 3 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(allowDuplicateIds=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual(Read('query1', '--TC------', '!!ZZ!!!!!!'),
read2)
def testDuplicateIdDisambiguation(self):
"""
Duplicate ids must be disambiguated if allowDuplicateIds is not given.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 0 ref1 3 60 2= * 0 0 TC ZZ',
'query1 0 ref1 5 60 2S2= * 0 0 TCGA ZZZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual(Read('query1/1', '--TC------', '!!ZZ!!!!!!'),
read2)
self.assertEqual(Read('query1/2', '--TCGA----', '!!ZZZZ!!!!'),
read3)
def testKeepQualityControlFailures(self):
"""
Keeping matches flagged as quality control failures must give the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query2 512 ref1 4 60 2= * 0 0 TC ZZ',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename, keepQCFailures=True))
(read1, read2) = list(ps.queries())
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual(Read('query2', '---TC-----', '!!!ZZ!!!!!'), read2)
def testSecondaryWithNoPreviousSequence(self):
"""
A secondary match with a '*' seq that is not preceeded by a query with
a sequence must result in a ValueError being raised.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query 256 ref1 3 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^pysam produced an alignment \\(number 1\\) with no '
'query sequence without previously giving an alignment '
'with a sequence\\.$')
queries = ps.queries()
assertRaisesRegex(self, InvalidSAM, error, list, queries)
def testSecondaryWithNoSequence(self):
"""
A secondary match with a '*' seq must result in the sequence from the
previous query being used.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 3M * 0 0 TCT ZZZ',
'query2 0 ref1 2 60 4M * 0 0 TCTA ZZZZ',
'query2 256 ref1 6 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3) = list(ps.queries())
self.assertEqual(Read('query1', '-TCT------', '!ZZZ!!!!!!'), read1)
self.assertEqual(Read('query2', '-TCTA-----', '!ZZZZ!!!!!'), read2)
self.assertEqual(Read('query2/1', '-----TCTA-', '!!!!!ZZZZ!'),
read3)
def testSupplementaryWithNoPreviousSequence(self):
"""
A supplementary match with a '*' seq that is not preceeded by a query
with a sequence must result in a ValueError being raised.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query 2048 ref1 3 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^pysam produced an alignment \\(number 1\\) with no '
'query sequence without previously giving an alignment '
'with a sequence\\.$')
queries = ps.queries()
assertRaisesRegex(self, InvalidSAM, error, list, queries)
def testSupplementaryWithNoSequence(self):
"""
A supplementary match with a '*' seq must result in the sequence from
the previous query being used.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 3M * 0 0 TCT ZZZ',
'query2 0 ref1 2 60 4M * 0 0 TCTA ZZZZ',
'query2 2048 ref1 6 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3) = list(ps.queries())
self.assertEqual(Read('query1', '-TCT------', '!ZZZ!!!!!!'), read1)
self.assertEqual(Read('query2', '-TCTA-----', '!ZZZZ!!!!!'), read2)
self.assertEqual(Read('query2/1', '-----TCTA-', '!!!!!ZZZZ!'),
read3)
def testNotSecondaryAndNotSupplementaryWithNoSequence(self):
"""
An alignment with a '*' seq that is not secondary or supplementary
must result in a ValueError being raised.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query 0 ref1 3 60 4M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
error = ('^pysam produced an alignment \\(number 1\\) with no '
'query sequence without previously giving an alignment '
'with a sequence\\.$')
queries = ps.queries()
assertRaisesRegex(self, InvalidSAM, error, list, queries)
def testAlsoYieldAlignments(self):
"""
A request for queries with their pysam alignments should have the
expected result.
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG 123456',
'query2 0 ref1 2 60 2= * 0 0 TC 78',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!123456!!!'), read1)
self.assertEqual('TCTAGG', read1.alignment.query_sequence)
self.assertEqual('123456', ''.join(
map(lambda x: chr(x + 33), read1.alignment.query_qualities)))
self.assertEqual(Read('query2', '-TC-------', '!78!!!!!!!'), read2)
self.assertEqual('TC', read2.alignment.query_sequence)
self.assertEqual('78', ''.join(
map(lambda x: chr(x + 33), read2.alignment.query_qualities)))
def testHardClippingInCIGARButQueryNotHardClipped(self):
"""
As documented in https://github.com/acorg/dark-matter/issues/630 we
must deal correctly with a case in which the CIGAR string says a
query is hard-clipped but the query sequence in the SAM file
actually isn't. This can be due to a prior alignment with a soft clip,
in which case the full query sequence has to be given before the
secondary alignment with the hard clip.
"""
data = '\n'.join([
'@SQ SN:Chimp-D00220 LN:8',
'@SQ SN:D-AM494716 LN:8',
'@SQ SN:D-XXX LN:8',
'@SQ SN:Chimp-YYY LN:8',
'query1 0 Chimp-D00220 1 0 3S5M * 0 0 TTTTGGTT 12345678',
'query1 256 D-AM494716 1 0 3H5M * 0 0 * *',
'query1 256 D-XXX 1 0 5H3M * 0 0 * *',
'query1 0 Chimp-YYY 1 0 8M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2, read3, read4) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', 'TGGTT---', '45678!!!'), read1)
self.assertEqual('TTTTGGTT', read1.alignment.query_sequence)
self.assertEqual(Read('query1/1', 'TGGTT---', '45678!!!'), read2)
self.assertEqual('TGGTT', read2.alignment.query_sequence)
self.assertEqual(Read('query1/2', 'GTT-----', '678!!!!!'), read3)
self.assertEqual('GTT', read3.alignment.query_sequence)
self.assertEqual(Read('query1/3', 'TTTTGGTT', '12345678'), read4)
self.assertEqual('TTTTGGTT', read4.alignment.query_sequence)
def testSecondaryAlignmentHasQuery(self):
"""
If the first alignment of a query is against a reference that is not
wanted, a subsequent secondary alignment (SAM flag = 256) must have
the original query and quality strings (even though these are only
present in the SAM as * characters and the query is None when it comes
back from pysam).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 256 ref2 2 60 2=2X2M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual('ref1', read1.alignment.reference_name)
self.assertEqual(Read('query1/1', '-TCTAGG---', '!ZZZZZZ!!!'),
read2)
self.assertEqual('ref2', read2.alignment.reference_name)
def testSupplementaryAlignmentHasQuery(self):
"""
If the first alignment of a query is against a reference that is not
wanted, a subsequent supplementary alignment (SAM flag = 2048) must
have the original query and quality strings (even though these are only
present in the SAM as * characters and the query is None when it comes
back from pysam).
"""
data = '\n'.join([
'@SQ SN:ref1 LN:10',
'@SQ SN:ref2 LN:10',
'query1 0 ref1 2 60 2=2X2M * 0 0 TCTAGG ZZZZZZ',
'query1 2048 ref2 2 60 2=2X2M * 0 0 * *',
]).replace(' ', '\t')
with dataFile(data) as filename:
ps = PaddedSAM(SAMFilter(filename))
(read1, read2) = list(ps.queries(addAlignment=True))
self.assertEqual(Read('query1', '-TCTAGG---', '!ZZZZZZ!!!'), read1)
self.assertEqual('ref1', read1.alignment.reference_name)
self.assertEqual(Read('query1/1', '-TCTAGG---', '!ZZZZZZ!!!'),
read2)
self.assertEqual('ref2', read2.alignment.reference_name)
class TestSamReferencesToStr(TestCase):
"""
Test the samReferencesToStr function.
"""
def testSimple(self):
"""
The referencesToStr method must return the expected string.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:91',
]).replace(' ', '\t')
with dataFile(data) as filename:
self.assertEqual('id1 (length 90)\nid2 (length 91)',
samReferencesToStr(filename))
def testIndent(self):
"""
The referencesToStr method must return the expected string when
passed an indent.
"""
data = '\n'.join([
'@SQ SN:id1 LN:90',
'@SQ SN:id2 LN:91',
]).replace(' ', '\t')
with dataFile(data) as filename:
self.assertEqual(' id1 (length 90)\n id2 (length 91)',
samReferencesToStr(filename, indent=2))
class TestHardClip(TestCase):
"""
Test the _hardClip function.
"""
def testHardClipInMiddle(self):
"""
If hard clipping is given as an operation not at the beginning or end
of the sequence, a ValueError must be raised.
"""
error = (
'^Invalid CIGAR tuples .* contains hard-clipping operation '
'that is neither at the start nor the end of the sequence\\.$')
assertRaisesRegex(
self, ValueError, error, _hardClip, 'CGT', '123',
((CMATCH, 1), (CHARD_CLIP, 1), (CMATCH, 1),))
def testThreeHardClips(self):
"""
If hard clipping is specified more than twice, a ValueError must be
raised.
"""
error = ('^Invalid CIGAR tuples .* specifies hard-clipping 3 times '
'\\(2 is the maximum\\).$')
assertRaisesRegex(
self, ValueError, error, _hardClip, 'CGT', '123',
((CHARD_CLIP, 1), (CHARD_CLIP, 1), (CHARD_CLIP, 1),))
def testNoClip(self):
"""
If no hard clipping is indicated, the function must return the
original sequence.
"""
self.assertEqual(('CGT', '123', False),
_hardClip('CGT', '123', ((CMATCH, 3),)))
def testClipLeft(self):
"""
If hard clipping on the left is indicated, and has not been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CGT', '456', True),
_hardClip('CAACGT', '123456', ((CHARD_CLIP, 3), (CMATCH, 3),)))
def testClipRight(self):
"""
If hard clipping on the right is indicated, and has not been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CA', '12', True),
_hardClip('CAACGT', '123456', ((CMATCH, 2), (CHARD_CLIP, 4),)))
def testClipBoth(self):
"""
If hard clipping on the left and right is indicated, and has not been
done, the function must return the expected sequence.
"""
self.assertEqual(
('AA', '23', True),
_hardClip('CAACGT', '123456',
((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3),)))
def testClipLeftAlreadyDone(self):
"""
If hard clipping on the left is indicated, and has already been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CGT', '123', False),
_hardClip('CGT', '123', ((CHARD_CLIP, 3), (CMATCH, 3),)))
def testClipRightAlreadyDone(self):
"""
If hard clipping on the right is indicated, and has already been done,
the function must return the expected sequence.
"""
self.assertEqual(
('CA', '12', False),
_hardClip('CA', '12', ((CMATCH, 2), (CHARD_CLIP, 4),)))
def testClipBothAlreadyDone(self):
"""
If hard clipping on the left and right is indicated, and has already
been done, the function must return the expected sequence.
"""
self.assertEqual(
('AA', '12', False),
_hardClip('AA', '12',
((CHARD_CLIP, 1), (CMATCH, 2), (CHARD_CLIP, 3),)))
|
terrycojones/dark-matter
|
test/test_sam.py
|
Python
|
mit
| 45,018
|
[
"pysam"
] |
5b68ab0d24d25142d4d8997eec32e48bd42ad1ed194eb176a67bb39ba4bfc6e3
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN modules."""
import collections
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import utils
class GAN(object):
"""Standard generative adversarial network setup.
The aim of the generator is to generate samples which fool a discriminator.
Does not make any assumptions about the discriminator and generator loss
functions.
Trained module components:
* discriminator
* generator
For the standard GAN algorithm, generator_inputs is a vector of noise (either
Gaussian or uniform).
"""
def __init__(self, discriminator, generator,
num_z_iters=None, z_step_size=None,
z_project_method=None, optimisation_cost_weight=None):
"""Constructs the module.
Args:
discriminator: The discriminator network. A sonnet module. See `nets.py`.
generator: The generator network. A sonnet module. For examples, see
`nets.py`.
num_z_iters: an integer, the number of latent optimisation steps.
z_step_size: an integer, latent optimisation step size.
z_project_method: the method for projecting latent after optimisation,
a string from {'norm', 'clip'}.
optimisation_cost_weight: a float, how much to penalise the distance of z
moved by latent optimisation.
"""
self._discriminator = discriminator
self.generator = generator
self.num_z_iters = num_z_iters
self.z_project_method = z_project_method
if z_step_size:
self._log_step_size_module = snt.TrainableVariable(
[],
initializers={'w': tf.constant_initializer(math.log(z_step_size))})
self.z_step_size = tf.exp(self._log_step_size_module())
self._optimisation_cost_weight = optimisation_cost_weight
def connect(self, data, generator_inputs):
"""Connects the components and returns the losses, outputs and debug ops.
Args:
data: a `tf.Tensor`: `[batch_size, ...]`. There are no constraints on the
rank
of this tensor, but it has to be compatible with the shapes expected
by the discriminator.
generator_inputs: a `tf.Tensor`: `[g_in_batch_size, ...]`. It does not
have to have the same batch size as the `data` tensor. There are not
constraints on the rank of this tensor, but it has to be compatible
with the shapes the generator network supports as inputs.
Returns:
An `ModelOutputs` instance.
"""
samples, optimised_z = utils.optimise_and_sample(
generator_inputs, self, data, is_training=True)
optimisation_cost = utils.get_optimisation_cost(generator_inputs,
optimised_z)
# Pass in the labels to the discriminator in case we are using a
# discriminator which makes use of labels. The labels can be None.
disc_data_logits = self._discriminator(data)
disc_sample_logits = self._discriminator(samples)
disc_data_loss = utils.cross_entropy_loss(
disc_data_logits,
tf.ones(tf.shape(disc_data_logits[:, 0]), dtype=tf.int32))
disc_sample_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.zeros(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
disc_loss = disc_data_loss + disc_sample_loss
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
optimization_components = self._build_optimization_components(
discriminator_loss=disc_loss, generator_loss=generator_loss,
optimisation_cost=optimisation_cost)
debug_ops = {}
debug_ops['disc_data_loss'] = disc_data_loss
debug_ops['disc_sample_loss'] = disc_sample_loss
debug_ops['disc_loss'] = disc_loss
debug_ops['gen_loss'] = generator_loss
debug_ops['opt_cost'] = optimisation_cost
if hasattr(self, 'z_step_size'):
debug_ops['z_step_size'] = self.z_step_size
return utils.ModelOutputs(
optimization_components, debug_ops)
def gen_loss_fn(self, data, samples):
"""Generator loss as latent optimisation's error function."""
del data
disc_sample_logits = self._discriminator(samples)
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
return generator_loss
def _build_optimization_components(
self, generator_loss=None, discriminator_loss=None,
optimisation_cost=None):
"""Create the optimization components for this module."""
discriminator_vars = _get_and_check_variables(self._discriminator)
generator_vars = _get_and_check_variables(self.generator)
if hasattr(self, '_log_step_size_module'):
step_vars = _get_and_check_variables(self._log_step_size_module)
generator_vars += step_vars
optimization_components = collections.OrderedDict()
optimization_components['disc'] = utils.OptimizationComponent(
discriminator_loss, discriminator_vars)
if self._optimisation_cost_weight:
generator_loss += self._optimisation_cost_weight * optimisation_cost
optimization_components['gen'] = utils.OptimizationComponent(
generator_loss, generator_vars)
return optimization_components
def get_variables(self):
disc_vars = _get_and_check_variables(self._discriminator)
gen_vars = _get_and_check_variables(self.generator)
return disc_vars, gen_vars
def _get_and_check_variables(module):
module_variables = module.get_all_variables()
if not module_variables:
raise ValueError(
'Module {} has no variables! Variables needed for training.'.format(
module.module_name))
# TensorFlow optimizers require lists to be passed in.
return list(module_variables)
|
deepmind/deepmind-research
|
cs_gan/gan.py
|
Python
|
apache-2.0
| 6,384
|
[
"Gaussian"
] |
25a3c621cabfc064457b585c105e7be51def91c5422b60cd3f4c74784088d355
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from ansible.compat.six import string_types
from ansible.compat.six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
JSON_TYPES = {
'false': False,
'null': None,
'true': True,
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, JSON_TYPES, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
display.warning('Exception in safe_eval() on expr: %s (%s)' % (expr, e))
if include_exceptions:
return (expr, e)
return expr
|
wkeeling/ansible
|
lib/ansible/template/safe_eval.py
|
Python
|
gpl-3.0
| 4,814
|
[
"VisIt"
] |
6397c3eef0a90da12135bf9fb54e3d804283d4dfda12438a2002cbca6788a874
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with commands building :py:class:`~basislist.BasisFamily` objects
for Pople and other non-Dunning orbital basis sets. Some
plausible fitting basis sets are supplied as defaults.
"""
from .basislist import *
def load_basfam_other():
# Pople
basis_sto3g = BasisFamily('STO-3G', zeta=1)
basis_sto6g = BasisFamily('STO-6G', zeta=1)
basis_321g = BasisFamily('3-21G', zeta=1)
basisfamily_list.append(basis_sto3g)
basisfamily_list.append(basis_sto6g)
basisfamily_list.append(basis_321g)
basis_631g = BasisFamily('6-31G', zeta=2)
basis_631g_d_ = BasisFamily('6-31G(d)', zeta=2)
basis_631g_d_p_ = BasisFamily('6-31G(d,p)', zeta=2)
basis_631gs = BasisFamily('6-31G*', '6-31g_d_', zeta=2)
basis_631gss = BasisFamily('6-31G**', '6-31g_d_p_', zeta=2)
basis_631pg = BasisFamily('6-31+G', zeta=2)
basis_631pg_d_ = BasisFamily('6-31+G(d)', zeta=2)
basis_631pg_d_p_ = BasisFamily('6-31+G(d,p)', zeta=2)
basis_631pgs = BasisFamily('6-31+G*', '6-31pg_d_', zeta=2)
basis_631pgss = BasisFamily('6-31+G**', '6-31pg_d_p_', zeta=2)
basis_631ppg = BasisFamily('6-31++G', zeta=2)
basis_631ppg_d_ = BasisFamily('6-31++G(d)', zeta=2)
basis_631ppg_d_p_ = BasisFamily('6-31++G(d,p)', zeta=2)
basis_631ppgs = BasisFamily('6-31++G*', '6-31ppg_d_', zeta=2)
basis_631ppgss = BasisFamily('6-31++G**', '6-31ppg_d_p_', zeta=2)
basisfamily_list.append(basis_631g)
basisfamily_list.append(basis_631g_d_)
basisfamily_list.append(basis_631g_d_p_)
basisfamily_list.append(basis_631gs)
basisfamily_list.append(basis_631gss)
basisfamily_list.append(basis_631pg)
basisfamily_list.append(basis_631pg_d_)
basisfamily_list.append(basis_631pg_d_p_)
basisfamily_list.append(basis_631pgs)
basisfamily_list.append(basis_631pgss)
basisfamily_list.append(basis_631ppg)
basisfamily_list.append(basis_631ppg_d_)
basisfamily_list.append(basis_631ppg_d_p_)
basisfamily_list.append(basis_631ppgs)
basisfamily_list.append(basis_631ppgss)
basis_6311g = BasisFamily('6-311G', zeta=3)
basis_6311g_d_ = BasisFamily('6-311G(d)', zeta=3)
basis_6311g_d_p_ = BasisFamily('6-311G(d,p)', zeta=3)
basis_6311gs = BasisFamily('6-311G*', '6-311g_d_', zeta=3)
basis_6311gss = BasisFamily('6-311G**', '6-311g_d_p_', zeta=3)
basis_6311g_2d_ = BasisFamily('6-311G(2d)', zeta=3)
basis_6311g_2d_p_ = BasisFamily('6-311G(2d,p)', zeta=3)
basis_6311g_2d_2p_ = BasisFamily('6-311G(2d,2p)', zeta=3)
basis_6311g_2df_ = BasisFamily('6-311G(2df)', zeta=3)
basis_6311g_2df_p_ = BasisFamily('6-311G(2df,p)', zeta=3)
basis_6311g_2df_2p_ = BasisFamily('6-311G(2df,2p)', zeta=3)
basis_6311g_2df_2pd_ = BasisFamily('6-311G(2df,2pd)', zeta=3)
basis_6311g_3df_ = BasisFamily('6-311G(3df)', zeta=3)
basis_6311g_3df_p_ = BasisFamily('6-311G(3df,p)', zeta=3)
basis_6311g_3df_2p_ = BasisFamily('6-311G(3df,2p)', zeta=3)
basis_6311g_3df_2pd_ = BasisFamily('6-311G(3df,2pd)', zeta=3)
basis_6311g_3df_3pd_ = BasisFamily('6-311G(3df,3pd)', zeta=3)
basisfamily_list.append(basis_6311g)
basisfamily_list.append(basis_6311g_d_)
basisfamily_list.append(basis_6311g_d_p_)
basisfamily_list.append(basis_6311gs)
basisfamily_list.append(basis_6311gss)
basisfamily_list.append(basis_6311g_2d_)
basisfamily_list.append(basis_6311g_2d_p_)
basisfamily_list.append(basis_6311g_2d_2p_)
basisfamily_list.append(basis_6311g_2df_)
basisfamily_list.append(basis_6311g_2df_p_)
basisfamily_list.append(basis_6311g_2df_2p_)
basisfamily_list.append(basis_6311g_2df_2pd_)
basisfamily_list.append(basis_6311g_3df_)
basisfamily_list.append(basis_6311g_3df_p_)
basisfamily_list.append(basis_6311g_3df_2p_)
basisfamily_list.append(basis_6311g_3df_2pd_)
basisfamily_list.append(basis_6311g_3df_3pd_)
basis_6311pg = BasisFamily('6-311+G', zeta=3)
basis_6311pg_d_ = BasisFamily('6-311+G(d)', zeta=3)
basis_6311pg_d_p_ = BasisFamily('6-311+G(d,p)', zeta=3)
basis_6311pgs = BasisFamily('6-311+G*', '6-311pg_d_', zeta=3)
basis_6311pgss = BasisFamily('6-311+G**', '6-311pg_d_p_', zeta=3)
basis_6311pg_2d_ = BasisFamily('6-311+G(2d)', zeta=3)
basis_6311pg_2d_p_ = BasisFamily('6-311+G(2d,p)', zeta=3)
basis_6311pg_2d_2p_ = BasisFamily('6-311+G(2d,2p)', zeta=3)
basis_6311pg_2df_ = BasisFamily('6-311+G(2df)', zeta=3)
basis_6311pg_2df_p_ = BasisFamily('6-311+G(2df,p)', zeta=3)
basis_6311pg_2df_2p_ = BasisFamily('6-311+G(2df,2p)', zeta=3)
basis_6311pg_2df_2pd_ = BasisFamily('6-311+G(2df,2pd)', zeta=3)
basis_6311pg_3df_ = BasisFamily('6-311+G(3df)', zeta=3)
basis_6311pg_3df_p_ = BasisFamily('6-311+G(3df,p)', zeta=3)
basis_6311pg_3df_2p_ = BasisFamily('6-311+G(3df,2p)', zeta=3)
basis_6311pg_3df_2pd_ = BasisFamily('6-311+G(3df,2pd)', zeta=3)
basis_6311pg_3df_3pd_ = BasisFamily('6-311+G(3df,3pd)', zeta=3)
basisfamily_list.append(basis_6311pg)
basisfamily_list.append(basis_6311pg_d_)
basisfamily_list.append(basis_6311pg_d_p_)
basisfamily_list.append(basis_6311pgs)
basisfamily_list.append(basis_6311pgss)
basisfamily_list.append(basis_6311pg_2d_)
basisfamily_list.append(basis_6311pg_2d_p_)
basisfamily_list.append(basis_6311pg_2d_2p_)
basisfamily_list.append(basis_6311pg_2df_)
basisfamily_list.append(basis_6311pg_2df_p_)
basisfamily_list.append(basis_6311pg_2df_2p_)
basisfamily_list.append(basis_6311pg_2df_2pd_)
basisfamily_list.append(basis_6311pg_3df_)
basisfamily_list.append(basis_6311pg_3df_p_)
basisfamily_list.append(basis_6311pg_3df_2p_)
basisfamily_list.append(basis_6311pg_3df_2pd_)
basisfamily_list.append(basis_6311pg_3df_3pd_)
basis_6311ppg = BasisFamily('6-311++G', zeta=3)
basis_6311ppg_d_ = BasisFamily('6-311++G(d)', zeta=3)
basis_6311ppg_d_p_ = BasisFamily('6-311++G(d,p)', zeta=3)
basis_6311ppgs = BasisFamily('6-311++G*', '6-311ppg_d_', zeta=3)
basis_6311ppgss = BasisFamily('6-311++G**', '6-311ppg_d_p_', zeta=3)
basis_6311ppg_2d_ = BasisFamily('6-311++G(2d)', zeta=3)
basis_6311ppg_2d_p_ = BasisFamily('6-311++G(2d,p)', zeta=3)
basis_6311ppg_2d_2p_ = BasisFamily('6-311++G(2d,2p)', zeta=3)
basis_6311ppg_2df_ = BasisFamily('6-311++G(2df)', zeta=3)
basis_6311ppg_2df_p_ = BasisFamily('6-311++G(2df,p)', zeta=3)
basis_6311ppg_2df_2p_ = BasisFamily('6-311++G(2df,2p)', zeta=3)
basis_6311ppg_2df_2pd_ = BasisFamily('6-311++G(2df,2pd)', zeta=3)
basis_6311ppg_3df_ = BasisFamily('6-311++G(3df)', zeta=3)
basis_6311ppg_3df_p_ = BasisFamily('6-311++G(3df,p)', zeta=3)
basis_6311ppg_3df_2p_ = BasisFamily('6-311++G(3df,2p)', zeta=3)
basis_6311ppg_3df_2pd_ = BasisFamily('6-311++G(3df,2pd)', zeta=3)
basis_6311ppg_3df_3pd_ = BasisFamily('6-311++G(3df,3pd)', zeta=3)
basisfamily_list.append(basis_6311ppg)
basisfamily_list.append(basis_6311ppg_d_)
basisfamily_list.append(basis_6311ppg_d_p_)
basisfamily_list.append(basis_6311ppgs)
basisfamily_list.append(basis_6311ppgss)
basisfamily_list.append(basis_6311ppg_2d_)
basisfamily_list.append(basis_6311ppg_2d_p_)
basisfamily_list.append(basis_6311ppg_2d_2p_)
basisfamily_list.append(basis_6311ppg_2df_)
basisfamily_list.append(basis_6311ppg_2df_p_)
basisfamily_list.append(basis_6311ppg_2df_2p_)
basisfamily_list.append(basis_6311ppg_2df_2pd_)
basisfamily_list.append(basis_6311ppg_3df_)
basisfamily_list.append(basis_6311ppg_3df_p_)
basisfamily_list.append(basis_6311ppg_3df_2p_)
basisfamily_list.append(basis_6311ppg_3df_2pd_)
basisfamily_list.append(basis_6311ppg_3df_3pd_)
# Ahlrichs
basis_def2sv_p_ = BasisFamily('def2-SV(P)', zeta=2)
basis_def2msvp = BasisFamily('def2-mSVP', zeta=2)
basis_def2svp = BasisFamily('def2-SVP', zeta=2)
basis_def2svpd = BasisFamily('def2-SVPD', zeta=2)
basis_def2tzvp = BasisFamily('def2-TZVP', zeta=3)
basis_def2tzvpd = BasisFamily('def2-TZVPD', zeta=3)
basis_def2tzvpp = BasisFamily('def2-TZVPP', zeta=3)
basis_def2tzvppd = BasisFamily('def2-TZVPPD', zeta=3)
basis_def2qzvp = BasisFamily('def2-QZVP', zeta=4)
basis_def2qzvpd = BasisFamily('def2-QZVPD', zeta=4)
basis_def2qzvpp = BasisFamily('def2-QZVPP', zeta=4)
basis_def2qzvppd = BasisFamily('def2-QZVPPD', zeta=4)
basis_def2sv_p_.add_jfit('def2-universal-JFIT')
basis_def2msvp.add_jfit('def2-universal-JFIT')
basis_def2svp.add_jfit('def2-universal-JFIT')
basis_def2svpd.add_jfit('def2-universal-JFIT')
basis_def2tzvp.add_jfit('def2-universal-JFIT')
basis_def2tzvpd.add_jfit('def2-universal-JFIT')
basis_def2tzvpp.add_jfit('def2-universal-JFIT')
basis_def2tzvppd.add_jfit('def2-universal-JFIT')
basis_def2qzvp.add_jfit('def2-universal-JFIT')
basis_def2qzvpd.add_jfit('def2-universal-JFIT')
basis_def2qzvpp.add_jfit('def2-universal-JFIT')
basis_def2qzvppd.add_jfit('def2-universal-JFIT')
basis_def2sv_p_.add_jkfit('def2-universal-JKFIT')
basis_def2msvp.add_jkfit('def2-universal-JKFIT')
basis_def2svp.add_jkfit('def2-universal-JKFIT')
basis_def2svpd.add_jkfit('def2-universal-JKFIT')
basis_def2tzvp.add_jkfit('def2-universal-JKFIT')
basis_def2tzvpd.add_jkfit('def2-universal-JKFIT')
basis_def2tzvpp.add_jkfit('def2-universal-JKFIT')
basis_def2tzvppd.add_jkfit('def2-universal-JKFIT')
basis_def2qzvp.add_jkfit('def2-universal-JKFIT')
basis_def2qzvpd.add_jkfit('def2-universal-JKFIT')
basis_def2qzvpp.add_jkfit('def2-universal-JKFIT')
basis_def2qzvppd.add_jkfit('def2-universal-JKFIT')
basis_def2sv_p_.add_rifit('def2-SV(P)-RI')
basis_def2msvp.add_rifit('def2-SVP-RI')
basis_def2svp.add_rifit('def2-SVP-RI')
basis_def2svpd.add_rifit('def2-SVPD-RI')
basis_def2tzvp.add_rifit('def2-TZVP-RI')
basis_def2tzvpd.add_rifit('def2-TZVPD-RI')
basis_def2tzvpp.add_rifit('def2-TZVPP-RI')
basis_def2tzvppd.add_rifit('def2-TZVPPD-RI')
basis_def2qzvp.add_rifit('def2-QZVP-RI')
# basis_def2qzvpd.add_rifit('')
basis_def2qzvpp.add_rifit('def2-QZVPP-RI')
basis_def2qzvppd.add_rifit('def2-QZVPPD-RI')
# def2sv_p_ too small for add_guess
basis_def2svp.add_guess('def2-SV(P)')
basis_def2svpd.add_guess('def2-SV(P)')
basis_def2tzvp.add_guess('def2-SV(P)')
basis_def2tzvpd.add_guess('def2-SV(P)')
basis_def2tzvpp.add_guess('def2-SV(P)')
basis_def2tzvppd.add_guess('def2-SV(P)')
basis_def2qzvp.add_guess('def2-SV(P)')
basis_def2qzvpd.add_guess('def2-SV(P)')
basis_def2qzvpp.add_guess('def2-SV(P)')
basis_def2qzvppd.add_guess('def2-SV(P)')
basisfamily_list.append(basis_def2sv_p_)
basisfamily_list.append(basis_def2msvp)
basisfamily_list.append(basis_def2svp)
basisfamily_list.append(basis_def2svpd)
basisfamily_list.append(basis_def2tzvp)
basisfamily_list.append(basis_def2tzvpd)
basisfamily_list.append(basis_def2tzvpp)
basisfamily_list.append(basis_def2tzvppd)
basisfamily_list.append(basis_def2qzvp)
basisfamily_list.append(basis_def2qzvpd)
basisfamily_list.append(basis_def2qzvpp)
basisfamily_list.append(basis_def2qzvppd)
# Jensen
basis_augpcseg0 = BasisFamily('aug-pcseg-0', zeta=1)
basis_augpcseg1 = BasisFamily('aug-pcseg-1', zeta=2)
basis_augpcseg2 = BasisFamily('aug-pcseg-2', zeta=3)
basis_augpcseg3 = BasisFamily('aug-pcseg-3', zeta=4)
basis_augpcseg4 = BasisFamily('aug-pcseg-4', zeta=5)
basis_augpcsseg0 = BasisFamily('aug-pcSseg-0', zeta=1)
basis_augpcsseg1 = BasisFamily('aug-pcSseg-1', zeta=2)
basis_augpcsseg2 = BasisFamily('aug-pcSseg-2', zeta=3)
basis_augpcsseg3 = BasisFamily('aug-pcSseg-3', zeta=4)
basis_augpcsseg4 = BasisFamily('aug-pcSseg-4', zeta=5)
basis_pcseg0 = BasisFamily('pcseg-0', zeta=1)
basis_pcseg1 = BasisFamily('pcseg-1', zeta=2)
basis_pcseg2 = BasisFamily('pcseg-2', zeta=3)
basis_pcseg3 = BasisFamily('pcseg-3', zeta=4)
basis_pcseg4 = BasisFamily('pcseg-4', zeta=5)
basis_pcsseg0 = BasisFamily('pcSseg-0', zeta=1)
basis_pcsseg1 = BasisFamily('pcSseg-1', zeta=2)
basis_pcsseg2 = BasisFamily('pcSseg-2', zeta=3)
basis_pcsseg3 = BasisFamily('pcSseg-3', zeta=4)
basis_pcsseg4 = BasisFamily('pcSseg-4', zeta=5)
# Here lie practical (non-validated) fitting bases for
# Jensen orbital basis sets
basis_augpcseg0.add_jfit('def2-universal-JFIT')
basis_augpcseg1.add_jfit('def2-universal-JFIT')
basis_augpcseg2.add_jfit('def2-universal-JFIT')
basis_augpcseg3.add_jfit('def2-universal-JFIT')
basis_augpcsseg0.add_jfit('def2-universal-JFIT')
basis_augpcsseg1.add_jfit('def2-universal-JFIT')
basis_augpcsseg2.add_jfit('def2-universal-JFIT')
basis_augpcsseg3.add_jfit('def2-universal-JFIT')
basis_pcseg0.add_jfit('def2-universal-JFIT')
basis_pcseg1.add_jfit('def2-universal-JFIT')
basis_pcseg2.add_jfit('def2-universal-JFIT')
basis_pcseg3.add_jfit('def2-universal-JFIT')
basis_pcsseg0.add_jfit('def2-universal-JFIT')
basis_pcsseg1.add_jfit('def2-universal-JFIT')
basis_pcsseg2.add_jfit('def2-universal-JFIT')
basis_pcsseg3.add_jfit('def2-universal-JFIT')
basis_augpcseg0.add_jkfit('def2-universal-JKFIT')
basis_augpcseg1.add_jkfit('def2-universal-JKFIT')
basis_augpcseg2.add_jkfit('def2-universal-JKFIT')
basis_augpcseg3.add_jkfit('def2-universal-JKFIT')
basis_augpcseg4.add_jkfit('aug-cc-pV5Z-JKFIT')
basis_augpcsseg0.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg1.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg2.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg3.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg4.add_jkfit('aug-cc-pV5Z-JKFIT')
basis_pcseg0.add_jkfit('def2-universal-JKFIT')
basis_pcseg1.add_jkfit('def2-universal-JKFIT')
basis_pcseg2.add_jkfit('def2-universal-JKFIT')
basis_pcseg3.add_jkfit('def2-universal-JKFIT')
basis_pcseg4.add_jkfit('cc-pV5Z-JKFIT')
basis_pcsseg0.add_jkfit('def2-universal-JKFIT')
basis_pcsseg1.add_jkfit('def2-universal-JKFIT')
basis_pcsseg2.add_jkfit('def2-universal-JKFIT')
basis_pcsseg3.add_jkfit('def2-universal-JKFIT')
basis_pcsseg4.add_jkfit('cc-pV5Z-JKFIT')
basis_augpcseg0.add_rifit('def2-SV(P)-RI')
basis_augpcseg1.add_rifit('def2-SVPD-RI')
basis_augpcseg2.add_rifit('def2-TZVPPD-RI')
basis_augpcseg3.add_rifit('def2-QZVPPD-RI')
basis_augpcseg4.add_rifit('aug-cc-pV5Z-RI')
basis_augpcsseg0.add_rifit('def2-SV(P)-RI')
basis_augpcsseg1.add_rifit('def2-SVPD-RI')
basis_augpcsseg2.add_rifit('def2-TZVPPD-RI')
basis_augpcsseg3.add_rifit('def2-QZVPPD-RI')
basis_augpcsseg4.add_rifit('aug-cc-pwCV5Z-RI')
basis_pcseg0.add_rifit('def2-SV(P)-RI')
basis_pcseg1.add_rifit('def2-SVP-RI')
basis_pcseg2.add_rifit('def2-TZVPP-RI')
basis_pcseg3.add_rifit('def2-QZVPP-RI')
basis_pcseg4.add_rifit('cc-pV5Z-RI')
basis_pcsseg0.add_rifit('def2-SV(P)-RI')
basis_pcsseg1.add_rifit('def2-SVP-RI')
basis_pcsseg2.add_rifit('def2-TZVPP-RI')
basis_pcsseg3.add_rifit('def2-QZVPP-RI')
basis_pcsseg4.add_rifit('cc-pwCV5Z-RI')
basis_augpcseg0.add_guess('pcseg-0')
basis_augpcseg1.add_guess('pcseg-0')
basis_augpcseg2.add_guess('pcseg-0')
basis_augpcseg3.add_guess('pcseg-0')
basis_augpcseg4.add_guess('pcseg-0')
basis_augpcsseg0.add_guess('pcseg-0')
basis_augpcsseg1.add_guess('pcseg-0')
basis_augpcsseg2.add_guess('pcseg-0')
basis_augpcsseg3.add_guess('pcseg-0')
basis_augpcsseg4.add_guess('pcseg-0')
# pcseg0 too small for add_guess
basis_pcseg1.add_guess('pcseg-0')
basis_pcseg2.add_guess('pcseg-0')
basis_pcseg3.add_guess('pcseg-0')
basis_pcseg4.add_guess('pcseg-0')
basis_pcsseg0.add_guess('pcseg-0')
basis_pcsseg1.add_guess('pcseg-0')
basis_pcsseg2.add_guess('pcseg-0')
basis_pcsseg3.add_guess('pcseg-0')
basis_pcsseg4.add_guess('pcseg-0')
basisfamily_list.append(basis_augpcseg0)
basisfamily_list.append(basis_augpcseg1)
basisfamily_list.append(basis_augpcseg2)
basisfamily_list.append(basis_augpcseg3)
basisfamily_list.append(basis_augpcseg4)
basisfamily_list.append(basis_augpcsseg0)
basisfamily_list.append(basis_augpcsseg1)
basisfamily_list.append(basis_augpcsseg2)
basisfamily_list.append(basis_augpcsseg3)
basisfamily_list.append(basis_augpcsseg4)
basisfamily_list.append(basis_pcseg0)
basisfamily_list.append(basis_pcseg1)
basisfamily_list.append(basis_pcseg2)
basisfamily_list.append(basis_pcseg3)
basisfamily_list.append(basis_pcseg4)
basisfamily_list.append(basis_pcsseg0)
basisfamily_list.append(basis_pcsseg1)
basisfamily_list.append(basis_pcsseg2)
basisfamily_list.append(basis_pcsseg3)
basisfamily_list.append(basis_pcsseg4)
# Minix
basis_minix = BasisFamily('minix', zeta=2)
basis_minix.add_jfit('def2-universal-JFIT')
basis_minix.add_jkfit('def2-universal-JKFIT')
basis_minix.add_rifit('def2-SVP-RI')
# mixix too small for add_guess
basisfamily_list.append(basis_minix)
# Others
basis_dz = BasisFamily('DZ')
basis_dzp = BasisFamily('DZP')
basis_dzvp = BasisFamily('DZVP')
basis_psi3dzp = BasisFamily('psi3-DZP')
basis_psi3tz2p = BasisFamily('psi3-TZ2P')
basis_psi3tz2pf = BasisFamily('psi3-TZ2PF')
basis_sadlejlpoldl = BasisFamily('sadlej-lpol-dl')
basis_sadlejlpolds = BasisFamily('sadlej-lpol-ds')
basis_sadlejlpolfl = BasisFamily('sadlej-lpol-fl')
basis_sadlejlpolfs = BasisFamily('sadlej-lpol-fs')
basisfamily_list.append(basis_dz)
basisfamily_list.append(basis_dzp)
basisfamily_list.append(basis_dzvp)
basisfamily_list.append(basis_psi3dzp)
basisfamily_list.append(basis_psi3tz2p)
basisfamily_list.append(basis_psi3tz2pf)
basisfamily_list.append(basis_sadlejlpoldl)
basisfamily_list.append(basis_sadlejlpolds)
basisfamily_list.append(basis_sadlejlpolfl)
basisfamily_list.append(basis_sadlejlpolfs)
# Here lie practical (non-validated) fitting bases for
# Pople orbital basis sets
basis_sto3g.add_jkfit('def2-universal-jkfit')
basis_sto3g.add_rifit('def2-svp-ri')
basis_sto6g.add_jkfit('def2-universal-jkfit')
basis_sto6g.add_rifit('def2-svp-ri')
# sto3g too small for add_guess
basis_321g.add_jkfit('def2-universal-jkfit')
basis_321g.add_rifit('def2-svp-ri')
# 321g too small for add_guess
basis_631g.add_jkfit('cc-pvdz-jkfit')
basis_631g_d_.add_jkfit('cc-pvdz-jkfit')
basis_631g_d_p_.add_jkfit('cc-pvdz-jkfit')
basis_631gs.add_jkfit('cc-pvdz-jkfit')
basis_631gss.add_jkfit('cc-pvdz-jkfit')
basis_631g.add_rifit('cc-pvdz-ri')
basis_631g_d_.add_rifit('cc-pvdz-ri')
basis_631g_d_p_.add_rifit('cc-pvdz-ri')
basis_631gs.add_rifit('cc-pvdz-ri')
basis_631gss.add_rifit('cc-pvdz-ri')
basis_631g.add_guess('3-21g')
basis_631g_d_.add_guess('3-21g')
basis_631g_d_p_.add_guess('3-21g')
basis_631gs.add_guess('3-21g')
basis_631gss.add_guess('3-21g')
basis_631pg.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pg_d_.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pg_d_p_.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pgs.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pgss.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pg.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pg_d_.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pg_d_p_.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pgs.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pgss.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pg.add_guess('3-21g')
basis_631pg_d_.add_guess('3-21g')
basis_631pg_d_p_.add_guess('3-21g')
basis_631pgs.add_guess('3-21g')
basis_631pgss.add_guess('3-21g')
basis_631ppg.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppg_d_.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppg_d_p_.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppgs.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppgss.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppg.add_rifit('aug-cc-pvdz-ri')
basis_631ppg_d_.add_rifit('aug-cc-pvdz-ri')
basis_631ppg_d_p_.add_rifit('aug-cc-pvdz-ri')
basis_631ppgs.add_rifit('aug-cc-pvdz-ri')
basis_631ppgss.add_rifit('aug-cc-pvdz-ri')
basis_631ppg.add_guess('3-21g')
basis_631ppg_d_.add_guess('3-21g')
basis_631ppg_d_p_.add_guess('3-21g')
basis_631ppgs.add_guess('3-21g')
basis_631ppgss.add_guess('3-21g')
basis_6311g.add_jkfit('cc-pvtz-jkfit')
basis_6311g_d_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_d_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311gs.add_jkfit('cc-pvtz-jkfit')
basis_6311gss.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2d_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2d_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2d_2p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_2p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_2pd_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_2p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_2pd_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_3pd_.add_jkfit('cc-pvtz-jkfit')
basis_6311g.add_rifit('cc-pvtz-ri')
basis_6311g_d_.add_rifit('cc-pvtz-ri')
basis_6311g_d_p_.add_rifit('cc-pvtz-ri')
basis_6311gs.add_rifit('cc-pvtz-ri')
basis_6311gss.add_rifit('cc-pvtz-ri')
basis_6311g_2d_.add_rifit('cc-pvtz-ri')
basis_6311g_2d_p_.add_rifit('cc-pvtz-ri')
basis_6311g_2d_2p_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_p_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_2p_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_2pd_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_p_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_2p_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_2pd_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_3pd_.add_rifit('cc-pvtz-ri')
basis_6311g.add_guess('3-21g')
basis_6311g_d_.add_guess('3-21g')
basis_6311g_d_p_.add_guess('3-21g')
basis_6311gs.add_guess('3-21g')
basis_6311gss.add_guess('3-21g')
basis_6311g_2d_.add_guess('3-21g')
basis_6311g_2d_p_.add_guess('3-21g')
basis_6311g_2d_2p_.add_guess('3-21g')
basis_6311g_2df_.add_guess('3-21g')
basis_6311g_2df_p_.add_guess('3-21g')
basis_6311g_2df_2p_.add_guess('3-21g')
basis_6311g_2df_2pd_.add_guess('3-21g')
basis_6311g_3df_.add_guess('3-21g')
basis_6311g_3df_p_.add_guess('3-21g')
basis_6311g_3df_2p_.add_guess('3-21g')
basis_6311g_3df_2pd_.add_guess('3-21g')
basis_6311g_3df_3pd_.add_guess('3-21g')
basis_6311pg.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_d_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_d_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pgs.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pgss.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2d_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2d_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2d_2p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_2p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_2pd_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_2p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_2pd_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_3pd_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_d_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_d_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pgs.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pgss.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2d_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2d_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2d_2p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_2p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_2pd_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_2p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_2pd_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_3pd_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg.add_guess('3-21g')
basis_6311pg_d_.add_guess('3-21g')
basis_6311pg_d_p_.add_guess('3-21g')
basis_6311pgs.add_guess('3-21g')
basis_6311pgss.add_guess('3-21g')
basis_6311pg_2d_.add_guess('3-21g')
basis_6311pg_2d_p_.add_guess('3-21g')
basis_6311pg_2d_2p_.add_guess('3-21g')
basis_6311pg_2df_.add_guess('3-21g')
basis_6311pg_2df_p_.add_guess('3-21g')
basis_6311pg_2df_2p_.add_guess('3-21g')
basis_6311pg_2df_2pd_.add_guess('3-21g')
basis_6311pg_3df_.add_guess('3-21g')
basis_6311pg_3df_p_.add_guess('3-21g')
basis_6311pg_3df_2p_.add_guess('3-21g')
basis_6311pg_3df_2pd_.add_guess('3-21g')
basis_6311pg_3df_3pd_.add_guess('3-21g')
basis_6311ppg.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_d_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_d_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppgs.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppgss.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2d_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2d_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2d_2p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_2p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_2pd_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_2p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_2pd_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_3pd_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_d_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_d_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppgs.add_rifit('aug-cc-pvtz-ri')
basis_6311ppgss.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2d_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2d_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2d_2p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_2p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_2pd_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_2p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_2pd_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_3pd_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg.add_guess('3-21g')
basis_6311ppg_d_.add_guess('3-21g')
basis_6311ppg_d_p_.add_guess('3-21g')
basis_6311ppgs.add_guess('3-21g')
basis_6311ppgss.add_guess('3-21g')
basis_6311ppg_2d_.add_guess('3-21g')
basis_6311ppg_2d_p_.add_guess('3-21g')
basis_6311ppg_2d_2p_.add_guess('3-21g')
basis_6311ppg_2df_.add_guess('3-21g')
basis_6311ppg_2df_p_.add_guess('3-21g')
basis_6311ppg_2df_2p_.add_guess('3-21g')
basis_6311ppg_2df_2pd_.add_guess('3-21g')
basis_6311ppg_3df_.add_guess('3-21g')
basis_6311ppg_3df_p_.add_guess('3-21g')
basis_6311ppg_3df_2p_.add_guess('3-21g')
basis_6311ppg_3df_2pd_.add_guess('3-21g')
basis_6311ppg_3df_3pd_.add_guess('3-21g')
# Petersson's nZaPa-NR basis sets
basis_2zapa_nr = BasisFamily('2zapa-nr',zeta=2)
basis_3zapa_nr = BasisFamily('3zapa-nr',zeta=3)
basis_4zapa_nr = BasisFamily('4zapa-nr',zeta=4)
basis_5zapa_nr = BasisFamily('5zapa-nr',zeta=5)
basis_6zapa_nr = BasisFamily('6zapa-nr',zeta=6)
basis_7zapa_nr = BasisFamily('7zapa-nr',zeta=7)
# fitting sets for nZaPa-NR
# Dunnings zeta+1 to be safe, tested on water dimer
# the full aug-JKFIT is possibly too much
#--------SCF-JKFIT error for nZaPa-NR
# results for GS energies of water dimer:
# delta_jk = E_conv - E_DFJK
# ZaPa zeta 2 : delta_jk = -0.000009
# ZaPa zeta 3 : delta_jk = -0.000002
# ZaPa zeta 4 : delta_jk = -0.000002
# ZaPa zeta 5 : delta_jk = -0.000002
# ZaPa zeta 6 : delta_jk = 0.000000
# ZaPa zeta 7 : delta_jk = 0.000000
basis_2zapa_nr.add_jkfit('aug-cc-pvtz-jkfit')
basis_3zapa_nr.add_jkfit('aug-cc-pvqz-jkfit')
basis_4zapa_nr.add_jkfit('aug-cc-pv5z-jkfit')
basis_5zapa_nr.add_jkfit('aug-cc-pv5z-jkfit')
basis_6zapa_nr.add_jkfit('aug-cc-pv6z-ri')
basis_7zapa_nr.add_jkfit('aug-cc-pv6z-ri')
basis_2zapa_nr.add_rifit('aug-cc-pvtz-ri')
basis_3zapa_nr.add_rifit('aug-cc-pvqz-ri')
basis_4zapa_nr.add_rifit('aug-cc-pv5z-ri')
basis_5zapa_nr.add_rifit('aug-cc-pv6z-ri')
basis_6zapa_nr.add_rifit('aug-cc-pv6z-ri')
basis_7zapa_nr.add_rifit('aug-cc-pv6z-ri')
basis_2zapa_nr.add_guess('pcseg-0')
basis_3zapa_nr.add_guess('pcseg-0')
basis_4zapa_nr.add_guess('pcseg-0')
basis_5zapa_nr.add_guess('pcseg-0')
basis_6zapa_nr.add_guess('pcseg-0')
basis_7zapa_nr.add_guess('pcseg-0')
basisfamily_list.append(basis_2zapa_nr)
basisfamily_list.append(basis_3zapa_nr)
basisfamily_list.append(basis_4zapa_nr)
basisfamily_list.append(basis_5zapa_nr)
basisfamily_list.append(basis_6zapa_nr)
basisfamily_list.append(basis_7zapa_nr)
# F12 basis sets
basis_cc_pvdz_f12 = BasisFamily('cc-pvdz-f12',zeta=2)
basis_cc_pvtz_f12 = BasisFamily('cc-pvtz-f12',zeta=3)
basis_cc_pvqz_f12 = BasisFamily('cc-pvqz-f12',zeta=4)
# basis_cc_pv5z_f12 = BasisFamily('cc-pV5Z-F12')
# ORCA manual suggests for F12 basis sets Dunning's zeta+1
basis_cc_pvdz_f12.add_jkfit('cc-pvtz-jkfit')
basis_cc_pvtz_f12.add_jkfit('cc-pvqz-jkfit')
basis_cc_pvqz_f12.add_jkfit('cc-pv5z-jkfit')
basis_cc_pvdz_f12.add_rifit('cc-pvtz-ri')
basis_cc_pvtz_f12.add_rifit('cc-pvqz-ri')
basis_cc_pvqz_f12.add_rifit('cc-pv5z-ri')
basis_cc_pvdz_f12.add_guess('pcseg-0')
basis_cc_pvtz_f12.add_guess('pcseg-0')
basis_cc_pvqz_f12.add_guess('pcseg-0')
basisfamily_list.append(basis_cc_pvqz_f12)
basisfamily_list.append(basis_cc_pvtz_f12)
basisfamily_list.append(basis_cc_pvqz_f12)
# basisfamily_list.append(basis_cc_pv5z_f12)
# Point fix for dzvp basis set ; default def2-qzvpp-jkfit
# was not giving correct result for iodine-containing molecules.
basis_dzvp.add_jfit('dgauss-dzvp-autoabs')
basis_dzvp.add_jkfit('dgauss-dzvp-mix')
basis_dzvp.add_rifit('dgauss-dzvp-autoaux')
|
psi4/psi4
|
psi4/driver/qcdb/basislistother.py
|
Python
|
lgpl-3.0
| 32,066
|
[
"ORCA",
"Psi4"
] |
77929127012ab222a7499557bbdb52c85cb14c95060cf6facc0744ac198bc5d8
|
from __future__ import print_function, division
from os.path import join
import tempfile
import shutil
from io import BytesIO
try:
from subprocess import STDOUT, CalledProcessError
from sympy.core.compatibility import check_output
except ImportError:
pass
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import find_executable
from .latex import latex
from sympy.utilities.decorator import doctest_depends_on
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',),
disable_viewers=('evince', 'gimp', 'superior-dvi-viewer'))
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception.
You may also enter 'file' for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if 'filename'
is unset. However, if it was set, then 'preview' writes the genereted
file to this filename instead.
There is also support for writing to a BytesIO like object, which needs
to be passed to the 'outputbuffer' argument.
>>> from io import BytesIO
>>> obj = BytesIO()
>>> preview(x + y, output='png', viewer='BytesIO',
... outputbuffer=obj)
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom documentclass or import certain set of LaTeX packages.
>>> preamble = "\\documentclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{document}"
>>> preview(x + y, output='png', preamble=preamble)
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see subprocess.Popen).
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
"sample.tex" and run the default png viewer to display the resulting
bitmap, do
>>> preview(x + y, outputTexFile="sample.tex")
"""
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
# TODO add candidates for windows to list
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
path = find_executable(candidate)
if path is not None:
viewer = path
break
else:
raise SystemError(
"No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer == "file":
if filename is None:
SymPyDeprecationWarning(feature="Using viewer=\"file\" without a "
"specified filename", deprecated_since_version="0.7.3",
useinstead="viewer=\"file\" and filename=\"desiredname\"",
issue=3919).warn()
elif viewer == "StringIO":
SymPyDeprecationWarning(feature="The preview() viewer StringIO",
useinstead="BytesIO", deprecated_since_version="0.7.4",
issue=3984).warn()
viewer = "BytesIO"
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"StringIO\"")
elif viewer == "BytesIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"BytesIO\"")
elif viewer not in special and not find_executable(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
preamble = r"""\documentclass[12pt]{article}
\pagestyle{empty}
%s
\begin{document}
""" % (package_includes)
else:
if len(packages) > 0:
raise ValueError("The \"packages\" keyword must not be set if a "
"custom LaTeX preamble was specified")
latex_main = preamble + '\n%s\n\n' + r"\end{document}"
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
try:
workdir = tempfile.mkdtemp()
with open(join(workdir, 'texput.tex'), 'w') as fh:
fh.write(latex_main % latex_string)
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
if not find_executable('latex'):
raise RuntimeError("latex program is not installed")
try:
check_output(['latex', '-halt-on-error', '-interaction=nonstopmode',
'texput.tex'], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'latex' exited abnormally with the following output:\n%s" %
e.output)
if output != "dvi":
defaultoptions = {
"ps": [],
"pdf": [],
"png": ["-T", "tight", "-z", "9", "--truecolor"]
}
commandend = {
"ps": ["-o", "texput.ps", "texput.dvi"],
"pdf": ["texput.dvi", "texput.pdf"],
"png": ["-o", "texput.png", "texput.dvi"]
}
cmd = ["dvi" + output]
if not find_executable(cmd[0]):
raise RuntimeError("%s is not installed" % cmd[0])
try:
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions[output])
cmd.extend(commandend[output])
except KeyError:
raise SystemError("Invalid output format: %s" % output)
try:
check_output(cmd, cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s' exited abnormally with the following output:\n%s" %
(' '.join(cmd), e.output))
src = "texput.%s" % (output)
if viewer == "file":
if filename is None:
buffer = BytesIO()
with open(join(workdir, src), 'rb') as fh:
buffer.write(fh.read())
return buffer
else:
shutil.move(join(workdir,src), filename)
elif viewer == "BytesIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for preview.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(join(workdir, src), decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="sympy",
resizable=False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
try:
check_output([viewer, src], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s %s' exited abnormally with the following output:\n%s" %
(viewer, src, e.output))
finally:
try:
shutil.rmtree(workdir) # delete directory
except OSError as e:
if e.errno != 2: # code 2 - no such file or directory
raise
|
hrashk/sympy
|
sympy/printing/preview.py
|
Python
|
bsd-3-clause
| 11,827
|
[
"VisIt"
] |
56c63caefdb6b8c0cf7f2c7107b9ff8345717068d170c03f37339d94754f8d3e
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
GAMESS Topology Parser
======================
.. versionadded:: 0.9.1
Reads a GAMESS_ output file (also Firefly_ and `GAMESS-UK`_) and pulls
element information from it. Symmetrical assembly is read (not
symmetry element!). Atom names are read from the GAMESS section. Any
information about residues or segments will not be populated.
.. _GAMESS: http://www.msg.ameslab.gov/gamess/
.. _Firefly: http://classic.chem.msu.su/gran/gamess/index.html
.. _`GAMESS-UK`: http://www.cfs.dl.ac.uk/
Classes
-------
.. autoclass:: GMSParser
:members:
:inherited-members:
"""
from __future__ import absolute_import
import re
import numpy as np
from . import guessers
from ..lib.util import openany
from .base import TopologyReaderBase
from ..core.topology import Topology
from ..core.topologyattrs import (
Atomids,
Atomnames,
Atomtypes,
Masses,
Resids,
Resnums,
Segids,
AtomAttr,
)
class AtomicCharges(AtomAttr):
attrname = 'atomiccharges'
singular = 'atomiccharge'
per_object = 'atom'
class GMSParser(TopologyReaderBase):
"""GAMESS_ topology parser.
Creates the following Attributes:
- names
- atomic charges
Guesses:
- types
- masses
.. versionadded:: 0.9.1
"""
format = 'GMS'
def parse(self):
"""Read list of atoms from a GAMESS file."""
names = []
at_charges = []
with openany(self.filename, 'rt') as inf:
while True:
line = inf.readline()
if not line:
raise EOFError
if re.match(r'^\s+ATOM\s+ATOMIC\s+COORDINATES\s*\(BOHR\).*',\
line):
break
line = inf.readline() # skip
while True:
line = inf.readline()
_m = re.match(\
r'^\s*([A-Za-z_][A-Za-z_0-9]*)\s+([0-9]+\.[0-9]+)\s+(\-?[0-9]+\.[0-9]+)\s+(\-?[0-9]+\.[0-9]+)\s+(\-?[0-9]+\.[0-9]+).*',
line)
if _m is None:
break
name = _m.group(1)
at_charge = int(float(_m.group(2)))
names.append(name)
at_charges.append(at_charge)
#TODO: may be use coordinates info from _m.group(3-5) ??
atomtypes = guessers.guess_types(names)
masses = guessers.guess_masses(atomtypes)
n_atoms = len(names)
attrs = [
Atomids(np.arange(n_atoms) + 1),
Atomnames(np.array(names, dtype=object)),
AtomicCharges(np.array(at_charges, dtype=np.int32)),
Atomtypes(atomtypes, guessed=True),
Masses(masses, guessed=True),
Resids(np.array([1])),
Resnums(np.array([1])),
Segids(np.array(['SYSTEM'], dtype=object)),
]
top = Topology(n_atoms, 1, 1,
attrs=attrs)
return top
|
alejob/mdanalysis
|
package/MDAnalysis/topology/GMSParser.py
|
Python
|
gpl-2.0
| 3,946
|
[
"GAMESS",
"MDAnalysis"
] |
2610f9166f9dde3a5eb6432f706caff3d886c7894b49c9850df672273da15423
|
# -------------------------------------------------------------------------
# Name: Waterdemand module
# Purpose:
#
# Author: PB
#
# Created: 15/07/2016
# Copyright: (c) PB 2016
# -------------------------------------------------------------------------
import numpy as np
from cwatm.management_modules import globals
from cwatm.management_modules.replace_pcr import npareatotal, npareamaximum
from cwatm.management_modules.data_handling import returnBool, binding, cbinding, loadmap, divideValues, checkOption, npareaaverage, readnetcdf2
from cwatm.hydrological_modules.water_demand.domestic import waterdemand_domestic
from cwatm.hydrological_modules.water_demand.industry import waterdemand_industry
from cwatm.hydrological_modules.water_demand.livestock import waterdemand_livestock
from cwatm.hydrological_modules.water_demand.irrigation import waterdemand_irrigation
from cwatm.hydrological_modules.water_demand.environmental_need import waterdemand_environmental_need
#PB1507
from cwatm.management_modules.data_handling import *
class water_demand:
"""
WATERDEMAND
calculating water demand -
Industrial, domenstic based on precalculated maps
Agricultural water demand based on water need by plants
**Global variables**
==================== ================================================================================ =========
Variable [self.var] Description Unit
==================== ================================================================================ =========
readAvlStorGroundwat same as storGroundwater but equal to 0 when inferior to a treshold m
nonFossilGroundwater groundwater abstraction which is sustainable and not using fossil resources m
waterbalance_module
waterBodyID lakes/reservoirs map with a single ID for each lake/reservoir --
compress_LR boolean map as mask map for compressing lake/reservoir --
decompress_LR boolean map as mask map for decompressing lake/reservoir --
MtoM3C conversion factor from m to m3 (compressed map) --
MtoM3 Coefficient to change units --
lakeVolumeM3C compressed map of lake volume m3
lakeStorageC m3
reservoirStorageM3C
lakeResStorageC
lakeResStorage
waterBodyTypCTemp
InvDtSec
cellArea Cell area [m²] of each simulated mesh
smalllakeVolumeM3
smalllakeStorage
act_SurfaceWaterAbst
fracVegCover Fraction of area covered by the corresponding landcover type
addtoevapotrans
M3toM Coefficient to change units --
act_irrConsumption actual irrgation water consumption m
channelStorage
act_bigLakeResAbst
act_smallLakeResAbst
returnFlow
modflowPumpingM
modflowTopography
modflowDepth2
leakageC
domesticDemand
pot_domesticConsumpt
dom_efficiency
demand_unit
envFlow
industryDemand
pot_industryConsumpt
ind_efficiency
unmetDemandPaddy
unmetDemandNonpaddy
unmetDemand
efficiencyPaddy
efficiencyNonpaddy
returnfractionIrr
irrDemand
totalIrrDemand
livestockDemand
pot_livestockConsump
liv_efficiency
allocSegments
swAbstractionFractio
modflowPumping
leakage
pumping
nonIrrReturnFlowFrac
nonIrruse
act_indDemand
act_domDemand
act_livDemand
nonIrrDemand
totalWaterDemand
act_irrWithdrawal
act_nonIrrWithdrawal
act_totalWaterWithdr
act_indConsumption
act_domConsumption
act_livConsumption
act_nonIrrConsumptio
act_totalIrrConsumpt
act_totalWaterConsum
returnflowIrr
pot_nonIrrConsumptio
readAvlChannelStorag
reservoir_command_ar
leakageC_daily
leakageC_daily_segme
pot_GroundwaterAbstr
renewableAvlWater
act_irrNonpaddyWithd
act_irrPaddyWithdraw
act_irrPaddyDemand
act_irrNonpaddyDeman
act_indWithdrawal
act_domWithdrawal
act_livWithdrawal
waterDemandLost
==================== ================================================================================ =========
**Functions**
"""
def __init__(self, model):
self.var = model.var
self.model = model
self.domestic = waterdemand_domestic(model)
self.industry = waterdemand_industry(model)
self.livestock = waterdemand_livestock(model)
self.irrigation = waterdemand_irrigation(model)
self.environmental_need = waterdemand_environmental_need(model)
def initial(self):
"""
Initial part of the water demand module
Set the water allocation
"""
if checkOption('includeWaterDemand'):
self.domestic.initial()
self.industry.initial()
self.livestock.initial()
self.irrigation.initial()
self.environmental_need.initial()
# if waterdemand is fixed:
self.var.waterdemandFixed = False
if "waterdemandFixed" in binding:
if returnBool('waterdemandFixed'):
self.var.waterdemandFixed = True
self.var.waterdemandFixedYear = loadmap('waterdemandFixedYear')
#if 'usingAllocSegments' in binding:
# if checkOption('usingAllocSegments'):
# self.var.allocSegments = loadmap('allocSegments').astype(np.int)
# self.var.segmentArea = np.where(self.var.allocSegments > 0, npareatotal(self.var.cellArea, self.var.allocSegments), self.var.cellArea)
# -------------------------------------------
# partitioningGroundSurfaceAbstraction
# partitioning abstraction sources: groundwater and surface water
# partitioning based on local average baseflow (m3/s) and upstream average discharge (m3/s)
# estimates of fractions of groundwater and surface water abstractions
swAbstractionFraction = loadmap('swAbstractionFrac')
if swAbstractionFraction < 0:
averageBaseflowInput = loadmap('averageBaseflow')
averageDischargeInput = loadmap('averageDischarge')
# convert baseflow from m to m3/s
if returnBool('baseflowInM'):
averageBaseflowInput = averageBaseflowInput * self.var.cellArea * self.var.InvDtSec
if checkOption('usingAllocSegments'):
averageBaseflowInput = np.where(self.var.allocSegments > 0, npareaaverage(averageBaseflowInput, self.var.allocSegments), averageBaseflowInput)
# averageUpstreamInput = np.where(self.var.allocSegments > 0, npareamaximum(averageDischargeInput, self.var.allocSegments), averageDischargeInput)
swAbstractionFraction = np.maximum(0.0, np.minimum(1.0, averageDischargeInput / np.maximum(1e-20, averageDischargeInput + averageBaseflowInput)))
swAbstractionFraction = np.minimum(1.0, np.maximum(0.0, swAbstractionFraction))
self.var.swAbstractionFraction = globals.inZero.copy()
for No in range(4):
self.var.swAbstractionFraction += self.var.fracVegCover[No] * swAbstractionFraction
for No in range(4, 6):
self.var.swAbstractionFraction += self.var.fracVegCover[No]
self.var.demand_unit = True
if "demand_unit" in binding:
self.var.demand_unit = returnBool('demand_unit')
# allocation zone
# regular grid inside the 2d array
# inner grid size
inner = 1
if "allocation_area" in binding:
inner = int(loadmap('allocation_area'))
latldd, lonldd, cell, invcellldd, rows, cols = readCoord(cbinding('Ldd'))
try:
filename = os.path.splitext(cbinding('Ldd'))[0] + '.nc'
cut0, cut1, cut2, cut3 = mapattrNetCDF(filename, check=False)
except:
cut0, cut1, cut2, cut3 = mapattrTiff(gdal.Open(filename, GA_ReadOnly))
arr = np.kron(np.arange(rows // inner * cols // inner).reshape((rows // inner, cols // inner)), np.ones((inner, inner)))
arr = arr[cut2:cut3, cut0:cut1].astype(int)
self.var.allocation_zone = compressArray(arr)
self.var.modflowPumping = globals.inZero.copy()
self.var.modflowPumpingM = globals.inZero.copy()
self.var.modflowDepth2 = 0
self.var.modflowTopography = 0
self.var.leakage = globals.inZero.copy()
self.var.pumping = globals.inZero.copy()
else: # no water demand
self.var.nonIrrReturnFlowFraction = globals.inZero.copy()
self.var.nonFossilGroundwaterAbs = globals.inZero.copy()
self.var.nonIrruse = globals.inZero.copy()
self.var.act_indDemand = globals.inZero.copy()
self.var.act_domDemand = globals.inZero.copy()
self.var.act_livDemand = globals.inZero.copy()
self.var.nonIrrDemand = globals.inZero.copy()
self.var.totalIrrDemand = globals.inZero.copy()
self.var.totalWaterDemand = globals.inZero.copy()
self.var.act_irrWithdrawal = globals.inZero.copy()
self.var.act_nonIrrWithdrawal = globals.inZero.copy()
self.var.act_totalWaterWithdrawal = globals.inZero.copy()
self.var.act_indConsumption = globals.inZero.copy()
self.var.act_domConsumption = globals.inZero.copy()
self.var.act_livConsumption = globals.inZero.copy()
self.var.act_nonIrrConsumption = globals.inZero.copy()
self.var.act_totalIrrConsumption = globals.inZero.copy()
self.var.act_totalWaterConsumption = globals.inZero.copy()
self.var.unmetDemand = globals.inZero.copy()
self.var.addtoevapotrans = globals.inZero.copy()
self.var.returnflowIrr = globals.inZero.copy()
self.var.returnFlow = globals.inZero.copy()
self.var.unmetDemandPaddy = globals.inZero.copy()
self.var.unmetDemandNonpaddy = globals.inZero.copy()
self.var.ind_efficiency = 1.
self.var.dom_efficiency = 1.
self.var.liv_efficiency = 1
self.var.modflowPumping = 0
self.var.modflowDepth2 = 0
self.var.modflowTopography = 0
self.var.act_bigLakeResAbst = globals.inZero.copy()
self.var.leakage = globals.inZero.copy()
self.var.pumping = globals.inZero.copy()
self.var.unmet_lost = 0
def dynamic(self):
"""
Dynamic part of the water demand module
* calculate the fraction of water from surface water vs. groundwater
* get non-Irrigation water demand and its return flow fraction
"""
if checkOption('includeWaterDemand'):
# for debugging of a specific date
#if (globals.dateVar['curr'] >= 137):
# ii =1
# ----------------------------------------------------
# WATER DEMAND
# Fix year of water demand on predefined year
wd_date = globals.dateVar['currDate']
if self.var.waterdemandFixed:
wd_date = wd_date.replace(day = 1)
wd_date = wd_date.replace(year = self.var.waterdemandFixedYear)
self.domestic.dynamic(wd_date)
self.industry.dynamic(wd_date)
self.livestock.dynamic(wd_date)
self.irrigation.dynamic()
self.environmental_need.dynamic()
if globals.dateVar['newStart'] or globals.dateVar['newMonth']:
# total (potential) non irrigation water demand
self.var.nonIrrDemand = self.var.domesticDemand + self.var.industryDemand + self.var.livestockDemand
self.var.pot_nonIrrConsumption = np.minimum(self.var.nonIrrDemand, self.var.pot_domesticConsumption +
self.var.pot_industryConsumption + self.var.pot_livestockConsumption)
# fraction of return flow from domestic and industrial water demand
self.var.nonIrrReturnFlowFraction = divideValues((self.var.nonIrrDemand - self.var.pot_nonIrrConsumption), self.var.nonIrrDemand)
# non-irrg fracs in nonIrrDemand
frac_industry = divideValues(self.var.industryDemand, self.var.nonIrrDemand)
frac_domestic = divideValues(self.var.domesticDemand, self.var.nonIrrDemand)
frac_livestock = divideValues(self.var.livestockDemand, self.var.nonIrrDemand)
# Sum up water demand
# totalDemand [m]: total maximum (potential) water demand: irrigation and non irrigation
totalDemand = self.var.nonIrrDemand + self.var.totalIrrDemand # in [m]
# ----------------------------------------------------
# WATER AVAILABILITY
# to avoid small values and to avoid surface water abstractions from dry channels (>= 0.01mm)
#self.var.readAvlChannelStorageM = np.where(self.var.channelStorage < (0.0005 * self.var.cellArea), 0., self.var.channelStorage) # in [m3]
# conversion m3 -> m # minus environmental flow
self.var.readAvlChannelStorageM = np.maximum(0.,self.var.channelStorage * self.var.M3toM - self.var.envFlow) # in [m]
#-------------------------------------
# WATER DEMAND vs. WATER AVAILABILITY
#-------------------------------------
# surface water abstraction that can be extracted to fulfill totalDemand
# - based on ChannelStorage and swAbstractionFraction * totalDemand
# sum up potential surface water abstraction (no groundwater abstraction under water and sealed area)
pot_SurfaceAbstract = totalDemand * self.var.swAbstractionFraction
# only local surface water abstraction is allowed (network is only within a cell)
self.var.act_SurfaceWaterAbstract = np.minimum(self.var.readAvlChannelStorageM, pot_SurfaceAbstract)
self.var.act_channelAbst = self.var.act_SurfaceWaterAbstract.copy()
# if surface water is not sufficient it is taken from groundwater
if checkOption('includeWaterBodies'):
# water that is still needed from surface water
remainNeed = np.maximum(pot_SurfaceAbstract - self.var.act_SurfaceWaterAbstract, 0)
# first from big Lakes and reservoirs, big lakes cover several gridcells
# collect all water demand from lake pixels of the same id
remainNeedBig = npareatotal(remainNeed, self.var.waterBodyID)
# not only the lakes and reservoirs but the command areas around water bodies e.g. here a buffer
remainNeedBig = npareatotal(remainNeed, self.var.waterBodyBuffer)
remainNeedBigC = np.compress(self.var.compress_LR, remainNeedBig)
# Storage of a big lake
lakeResStorageC = np.where(self.var.waterBodyTypCTemp == 0, 0.,
np.where(self.var.waterBodyTypCTemp == 1, self.var.lakeStorageC, self.var.reservoirStorageM3C)) / self.var.MtoM3C
minlake = np.maximum(0., 0.98*lakeResStorageC) #reasonable but arbitrary limit
act_bigLakeAbstC = np.minimum(minlake , remainNeedBigC)
# substract from both, because it is sorted by self.var.waterBodyTypCTemp
self.var.lakeStorageC = self.var.lakeStorageC - act_bigLakeAbstC * self.var.MtoM3C
self.var.lakeVolumeM3C = self.var.lakeVolumeM3C - act_bigLakeAbstC * self.var.MtoM3C
self.var.reservoirStorageM3C = self.var.reservoirStorageM3C - act_bigLakeAbstC * self.var.MtoM3C
# and from the combined one for waterbalance issues
self.var.lakeResStorageC = self.var.lakeResStorageC - act_bigLakeAbstC * self.var.MtoM3C
self.var.lakeResStorage = globals.inZero.copy()
np.put(self.var.lakeResStorage, self.var.decompress_LR, self.var.lakeResStorageC)
bigLakesFactorC = divideValues(act_bigLakeAbstC , remainNeedBigC)
# and back to the big array
bigLakesFactor = globals.inZero.copy()
np.put(bigLakesFactor, self.var.decompress_LR, bigLakesFactorC)
#bigLakesFactorAllaroundlake = npareamaximum(bigLakesFactor, self.var.waterBodyID)
bigLakesFactorAllaroundlake = npareamaximum(bigLakesFactor, self.var.waterBodyBuffer)
# abstraction from big lakes is partioned to the users around the lake
self.var.act_bigLakeResAbst = remainNeed * bigLakesFactorAllaroundlake
# remaining need is used from small lakes
remainNeed1 = remainNeed * (1 - bigLakesFactorAllaroundlake)
#minlake = np.maximum(0.,self.var.smalllakeStorage - self.var.minsmalllakeStorage) * self.var.M3toM
if returnBool('useSmallLakes'):
minlake = np.maximum(0.,0.98 * self.var.smalllakeStorage) * self.var.M3toM
self.var.act_smallLakeResAbst = np.minimum(minlake, remainNeed1)
#self.var.actLakeResAbst = np.minimum(0.5 * self.var.smalllakeStorageM3 * self.var.M3toM, remainNeed)
# act_smallLakesres is substracted from small lakes storage
self.var.smalllakeVolumeM3 = self.var.smalllakeVolumeM3 - self.var.act_smallLakeResAbst * self.var.MtoM3
self.var.smalllakeStorage = self.var.smalllakeStorage - self.var.act_smallLakeResAbst * self.var.MtoM3
else:
self.var.act_smallLakeResAbst = 0
# available surface water is from river network + large/small lake & reservoirs
self.var.act_SurfaceWaterAbstract = self.var.act_SurfaceWaterAbstract + self.var.act_bigLakeResAbst + self.var.act_smallLakeResAbst
# check for rounding issues
self.var.act_SurfaceWaterAbstract = np.minimum(totalDemand,self.var.act_SurfaceWaterAbstract)
# remaining is taken from groundwater if possible
remainNeed2 = pot_SurfaceAbstract - self.var.act_SurfaceWaterAbstract
if 'using_reservoir_command_areas' in binding:
if checkOption('using_reservoir_command_areas'): # checkOption('usingAllocSegments2'):
# ABOUT
#
# The command area of a reservoir is the area that can receive water from this reservoir, through canals or other means.
# Performed above, each cell has attempted to satisfy its demands with local water using in-cell channel, lake, and reservoir storage.
# The remaining demand within each command area is totaled and requested from the associated reservoir.
# The reservoir offers this water up to a daily maximum relating to the available storage in the reservoir, defined in the Reservoir_releases_input_file.
#
# SETTINGS FILE AND INPUTS
# -Activating
# In the OPTIONS section towards the beginning of the settings file, add/set
# using_reservoir_command_areas = True
# - Command areas raster map
# Anywhere after the OPTIONS section (in WATERDEMAND, for example), add/set reservoir_command_areas to a path holding...
# information about the command areas. This Command areas raster map should assign the same positive integer coding to each cell within the same segment.
# All other cells must Nan values, or values <= 0.
# -Optional inputs
#
# Anywhere after the OPTIONS section, add/set Reservoir_releases_input_file to a path holding information about irrigation releases.
# This should be a raster map (netCDF) of 366 values determining the maximum fraction of available storage to be used for meeting water demand...
# in the associated command area on the day of the year. If this is not included, a value of 0.01 will be assumed,
# i.e. 1% of the reservoir storage can be at most released into the command area on each day.
## Command area total demand
#
# The remaining demand within each command area [M3] is put into a map where each cell in the command area holds this total demand
demand_Segment = np.where(self.var.reservoir_command_areas > 0,
npareatotal(remainNeed2 * self.var.cellArea,
self.var.reservoir_command_areas),
0) # [M3]
## Reservoir associated with the Command Area
#
# If there is more than one reservoir in a command area, the storage of the reservoir with maximum storage in this time-step is chosen.
# The map resStorageTotal_alloc holds this maximum reservoir storage within a command area in all cells within that command area
reservoirStorageM3 = globals.inZero.copy()
np.put(reservoirStorageM3, self.var.decompress_LR, self.var.reservoirStorageM3C)
resStorageTotal_alloc = np.where(self.var.reservoir_command_areas > 0,
npareamaximum(reservoirStorageM3,
self.var.reservoir_command_areas), 0) # [M3]
# In the map resStorageTotal_allocC, the maximum storage from each allocation segment is held in all reservoir cells within that allocation segment.
# We now correct to remove the reservoirs that are not this maximum-storage-reservoir for the command area.
resStorageTotal_allocC = np.compress(self.var.compress_LR, resStorageTotal_alloc)
resStorageTotal_allocC = np.multiply(resStorageTotal_allocC == self.var.reservoirStorageM3C,
resStorageTotal_allocC)
# The rules for the maximum amount of water to be released for irrigation are found for the chosen maximum-storage reservoir in each command area
day_of_year = globals.dateVar['currDate'].timetuple().tm_yday
if 'Reservoir_releases_input_file' in binding:
resStorage_maxFracForIrrigation = readnetcdf2('Reservoir_releases_input_file', day_of_year,
useDaily='DOY', value='Fraction of Volume')
else:
resStorage_maxFracForIrrigation = 0.01 + globals.inZero.copy()
# resStorage_maxFracForIrrigationC holds the fractional rules found for each reservoir, so we must null those that are not the maximum-storage reservoirs
resStorage_maxFracForIrrigationC = np.compress(self.var.compress_LR,
resStorage_maxFracForIrrigation)
resStorage_maxFracForIrrigationC = np.multiply(
resStorageTotal_allocC == self.var.reservoirStorageM3C, resStorage_maxFracForIrrigationC)
np.put(resStorage_maxFracForIrrigation, self.var.decompress_LR, resStorage_maxFracForIrrigationC)
resStorage_maxFracForIrrigation_CA = np.where(self.var.reservoir_command_areas > 0,
npareamaximum(resStorage_maxFracForIrrigation,
self.var.reservoir_command_areas), 0)
if 'Water_conveyance_efficiency' in binding:
Water_conveyance_efficiency = loadmap('Water_conveyance_efficiency')
else:
Water_conveyance_efficiency = 1.0
act_bigLakeResAbst_alloc = np.minimum(resStorage_maxFracForIrrigation_CA * resStorageTotal_alloc,
demand_Segment / Water_conveyance_efficiency) # [M3]
ResAbstractFactor = np.where(resStorageTotal_alloc > 0,
divideValues(act_bigLakeResAbst_alloc, resStorageTotal_alloc),
0) # fraction of water abstracted versus water available for total segment reservoir volumes
# Compressed version needs to be corrected as above
ResAbstractFactorC = np.compress(self.var.compress_LR, ResAbstractFactor)
ResAbstractFactorC = np.multiply(resStorageTotal_allocC == self.var.reservoirStorageM3C,
ResAbstractFactorC)
self.var.lakeStorageC -= self.var.reservoirStorageM3C * ResAbstractFactorC
self.var.lakeVolumeM3C -= self.var.reservoirStorageM3C * ResAbstractFactorC
self.var.lakeResStorageC -= self.var.reservoirStorageM3C * ResAbstractFactorC
self.var.reservoirStorageM3C -= self.var.reservoirStorageM3C * ResAbstractFactorC
self.var.lakeResStorage = globals.inZero.copy()
np.put(self.var.lakeResStorage, self.var.decompress_LR, self.var.lakeResStorageC)
metRemainSegment = np.where(demand_Segment > 0,
divideValues(act_bigLakeResAbst_alloc * Water_conveyance_efficiency,
demand_Segment), 0) # by definition <= 1
self.var.leakageC_daily = resStorageTotal_allocC * ResAbstractFactorC * (
1 - Water_conveyance_efficiency)
self.var.leakageC += self.var.leakageC_daily
self.var.leakageC_daily_segments = np.sum(self.var.leakageC_daily) + globals.inZero
self.var.act_bigLakeResAbst += remainNeed2 * metRemainSegment
self.var.act_SurfaceWaterAbstract += remainNeed2 * metRemainSegment
## End of using_reservoir_command_areas
# remaining is taken from groundwater if possible
self.var.pot_GroundwaterAbstract = totalDemand - self.var.act_SurfaceWaterAbstract
self.var.nonFossilGroundwaterAbs = np.maximum(0.,np.minimum(self.var.readAvlStorGroundwater, self.var.pot_GroundwaterAbstract))
# calculate renewableAvlWater_local (non-fossil groundwater and channel) - environmental flow
#self.var.renewableAvlWater_local = self.var.readAvlStorGroundwater + self.var.readAvlChannelStorageM
# if limitAbstraction from groundwater is True
# fossil gwAbstraction and water demand may be reduced
# variable to reduce/limit groundwater abstraction (> 0 if limitAbstraction = True)
if checkOption('limitAbstraction'):
# real surface water abstraction can be lower, because not all demand can be done from surface water
act_swAbstractionFraction = divideValues(self.var.act_SurfaceWaterAbstract, totalDemand)
# Fossil groundwater abstraction is not allowed
# allocation rule here: domestic& industry > irrigation > paddy
# non-irrgated water demand: adjusted (and maybe increased) by gwabstration factor
# if nonirrgated water demand is higher than actual growndwater abstraction (wwhat is needed and what is stored in gw)
act_nonIrrWithdrawalGW = self.var.nonIrrDemand * (1 - act_swAbstractionFraction)
act_nonIrrWithdrawalGW = np.where(act_nonIrrWithdrawalGW > self.var.nonFossilGroundwaterAbs, self.var.nonFossilGroundwaterAbs, act_nonIrrWithdrawalGW)
act_nonIrrWithdrawalSW = act_swAbstractionFraction * self.var.nonIrrDemand
self.var.act_nonIrrWithdrawal = act_nonIrrWithdrawalSW + act_nonIrrWithdrawalGW
# irrigated water demand:
act_irrWithdrawalGW = self.var.totalIrrDemand * (1 - act_swAbstractionFraction)
act_irrWithdrawalGW = np.minimum(self.var.nonFossilGroundwaterAbs - act_nonIrrWithdrawalGW, act_irrWithdrawalGW)
act_irrWithdrawalSW = act_swAbstractionFraction * self.var.totalIrrDemand
self.var.act_irrWithdrawal = act_irrWithdrawalSW + act_irrWithdrawalGW
# (nonpaddy)
act_irrnonpaddyGW = self.var.fracVegCover[3] * (1 - act_swAbstractionFraction) * self.var.irrDemand[3]
act_irrnonpaddyGW = np.minimum(self.var.nonFossilGroundwaterAbs - act_nonIrrWithdrawalGW, act_irrnonpaddyGW)
act_irrnonpaddySW = self.var.fracVegCover[3] * act_swAbstractionFraction * self.var.irrDemand[3]
self.var.act_irrNonpaddyWithdrawal = act_irrnonpaddySW + act_irrnonpaddyGW
# (paddy)
act_irrpaddyGW = self.var.fracVegCover[2] * (1 - act_swAbstractionFraction) * self.var.irrDemand[2]
act_irrpaddyGW = np.minimum(self.var.nonFossilGroundwaterAbs - act_nonIrrWithdrawalGW - act_irrnonpaddyGW, act_irrpaddyGW)
act_irrpaddySW = self.var.fracVegCover[2] * act_swAbstractionFraction * self.var.irrDemand[2]
self.var.act_irrPaddyWithdrawal = act_irrpaddySW + act_irrpaddyGW
act_gw = act_nonIrrWithdrawalGW + act_irrWithdrawalGW
# todo: is act_irrWithdrawal needed to be replaced? Check later!!
# consumption - irrigation (without loss) = demand * efficiency (back to non fraction value)
## back to non fraction values
# self.var.act_irrWithdrawal[2] = divideValues(self.var.act_irrPaddyWithdrawal, self.var.fracVegCover[2])
#self.var.act_irrWithdrawal[3] = divideValues(self.var.act_irrNonpaddyWithdrawal, self.var.fracVegCover[3])
## consumption - irrigation (without loss) = demand * efficiency
# calculate act_ water demand, because irr demand has still demand from previous day included
# if the demand from previous day is not fulfilled it is taken to the next day and so on
# if we do not correct we double account each day the demand from previous days
self.var.act_irrPaddyDemand = np.maximum(0, self.var.irrPaddyDemand - self.var.unmetDemandPaddy)
self.var.act_irrNonpaddyDemand = np.maximum(0, self.var.irrNonpaddyDemand - self.var.unmetDemandNonpaddy)
# unmet is either pot_GroundwaterAbstract - self.var.nonFossilGroundwaterAbs or demand - withdrawal
self.var.unmetDemand = (self.var.totalIrrDemand - self.var.act_irrWithdrawal) + (self.var.nonIrrDemand - self.var.act_nonIrrWithdrawal)
self.var.unmetDemandPaddy = self.var.irrPaddyDemand - self.var.act_irrPaddyDemand
self.var.unmetDemandNonpaddy = self.var.irrNonpaddyDemand - self.var.act_irrNonpaddyDemand
else:
# Fossil groundwater abstractions are allowed (act = pot)
self.var.unmetDemand = self.var.pot_GroundwaterAbstract - self.var.nonFossilGroundwaterAbs
# using allocation from abstraction zone
# this might be a regualr grid e.g. 2x2 for 0.5 deg
left_sf = self.var.readAvlChannelStorageM - self.var.act_channelAbst
# sum demand, surface water - local used, groundwater - local use, not satisfied for allocation zone
zoneDemand = npareatotal(self.var.unmetDemand,self.var.allocation_zone)
zone_sf_avail = npareatotal(left_sf, self.var.allocation_zone)
# zone abstraction is minimum of availability and demand
zone_sf_abstraction = np.minimum(zoneDemand,zone_sf_avail)
# water taken from surface zone and allocated to cell demand
cell_sf_abstraction = np.maximum(0.,divideValues(left_sf,zone_sf_avail) * zone_sf_abstraction)
cell_sf_allocation = np.maximum(0.,divideValues(self.var.unmetDemand, zoneDemand) * zone_sf_abstraction)
# sum up with other abstraction
self.var.act_SurfaceWaterAbstract = self.var.act_SurfaceWaterAbstract + cell_sf_abstraction
self.var.act_channelAbst = self.var.act_channelAbst + cell_sf_abstraction
# new potential groundwater abstraction
self.var.pot_GroundwaterAbstract = np.maximum(0.,self.var.pot_GroundwaterAbstract - cell_sf_allocation)
left_gw_demand = np.maximum(0.,self.var.pot_GroundwaterAbstract - self.var.nonFossilGroundwaterAbs)
left_gw_avail = self.var.readAvlStorGroundwater - self.var.nonFossilGroundwaterAbs
zone_gw_avail = npareatotal(left_gw_avail, self.var.allocation_zone)
# for groundwater substract demand which is fulfilled by surface zone, calc abstraction and what is left.
#zone_gw_demand = npareatotal(left_gw_demand, self.var.allocation_zone)
zone_gw_demand = zoneDemand - zone_sf_abstraction
zone_gw_abstraction = np.minimum(zone_gw_demand,zone_gw_avail)
#zone_unmetdemand = np.maximum(0., zone_gw_demand - zone_gw_abstraction)
# water taken from groundwater zone and allocated to cell demand
cell_gw_abstraction = np.maximum(0.,divideValues(left_gw_avail,zone_gw_avail) * zone_gw_abstraction)
cell_gw_allocation = np.maximum(0.,divideValues(left_gw_demand,zone_gw_demand) * zone_gw_abstraction)
self.var.unmetDemand = np.maximum(0.,left_gw_demand - cell_gw_allocation)
self.var.nonFossilGroundwaterAbs = self.var.nonFossilGroundwaterAbs + cell_gw_abstraction
#self.var.unmetDemand = self.var.pot_GroundwaterAbstract - self.var.nonFossilGroundwaterAbs
## end of zonal abstraction
# unmet demand is again checked for water from channels and abstraction from surface is increased
#channelAbs2 = np.minimum(self.var.readAvlChannelStorageM - self.var.act_channelAbst, self.var.unmetDemand)
#self.var.act_SurfaceWaterAbstract = self.var.act_SurfaceWaterAbstract + channelAbs2
#self.var.act_channelAbst = self.var.act_channelAbst + channelAbs2
#self.var.unmetDemand = self.var.unmetDemand - channelAbs2
#self.var.pot_GroundwaterAbstract = self.var.pot_GroundwaterAbstract - channelAbs2
self.var.act_nonIrrWithdrawal = self.var.nonIrrDemand
self.var.act_irrWithdrawal = self.var.totalIrrDemand
act_gw = self.var.pot_GroundwaterAbstract
self.var.act_irrNonpaddyWithdrawal = self.var.fracVegCover[3] * self.var.irrDemand[3]
self.var.act_irrPaddyWithdrawal = self.var.fracVegCover[2] * self.var.irrDemand[2]
## End of limit extraction if, then
self.var.act_irrConsumption[2] = divideValues(self.var.act_irrPaddyWithdrawal, self.var.fracVegCover[2]) * self.var.efficiencyPaddy
self.var.act_irrConsumption[3] = divideValues(self.var.act_irrNonpaddyWithdrawal, self.var.fracVegCover[3]) * self.var.efficiencyNonpaddy
self.var.pumping = act_gw
if 'demand2pumping' in binding:
if checkOption('demand2pumping'): self.var.modflowPumpingM += act_gw
self.var.act_indWithdrawal = frac_industry * self.var.act_nonIrrWithdrawal
self.var.act_domWithdrawal = frac_domestic * self.var.act_nonIrrWithdrawal
self.var.act_livWithdrawal = frac_livestock * self.var.act_nonIrrWithdrawal
self.var.act_indConsumption = self.var.ind_efficiency * self.var.act_indWithdrawal
self.var.act_domConsumption = self.var.dom_efficiency * self.var.act_domWithdrawal
self.var.act_livConsumption = self.var.liv_efficiency * self.var.act_livWithdrawal
self.var.act_nonIrrConsumption = self.var.act_domConsumption + self.var.act_indConsumption + self.var.act_livConsumption
self.var.act_totalIrrConsumption = self.var.fracVegCover[2] * self.var.act_irrConsumption[2] + self.var.fracVegCover[3] * self.var.act_irrConsumption[3]
self.var.act_paddyConsumption = self.var.fracVegCover[2] * self.var.act_irrConsumption[2]
self.var.act_nonpaddyConsumption = self.var.fracVegCover[3] * self.var.act_irrConsumption[3]
self.var.totalWaterDemand = self.var.fracVegCover[2] * self.var.irrDemand[2] + self.var.fracVegCover[3] * self.var.irrDemand[3] + self.var.nonIrrDemand
self.var.act_totalWaterWithdrawal = self.var.act_nonIrrWithdrawal + self.var.act_irrWithdrawal
self.var.act_totalWaterConsumption = self.var.act_nonIrrConsumption + self.var.act_totalIrrConsumption
# --- calculate return flow
#Sum up loss - difference between withdrawn and consumed - split into return flow and evaporation
sumIrrLoss = self.var.act_irrWithdrawal - self.var.act_totalIrrConsumption
self.var.returnflowIrr = self.var.returnfractionIrr * sumIrrLoss
self.var.addtoevapotrans = (1- self.var.returnfractionIrr) * sumIrrLoss
self.var.returnflowNonIrr = self.var.nonIrrReturnFlowFraction * self.var.act_nonIrrWithdrawal
# limit return flow to not put all fossil groundwater back into the system, because
# it can lead to higher river discharge than without water demand, as water is taken from fossil groundwater (out of system)
unmet_div_ww = 1. - np.minimum(1, divideValues(self.var.unmetDemand, self.var.act_totalWaterWithdrawal))
self.var.unmet_lost = ( self.var.returnflowIrr + self.var.returnflowNonIrr + self.var.addtoevapotrans) * (1-unmet_div_ww)
#self.var.waterDemandLost = self.var.act_totalWaterConsumption + self.var.addtoevapotrans
self.var.unmet_lostirr = ( self.var.returnflowIrr + self.var.addtoevapotrans) * (1-unmet_div_ww)
self.var.unmet_lostNonirr = self.var.returnflowNonIrr * (1-unmet_div_ww)
self.var.returnflowIrr = self.var.returnflowIrr * unmet_div_ww
self.var.addtoevapotrans = self.var.addtoevapotrans * unmet_div_ww
self.var.returnflowNonIrr = self.var.returnflowNonIrr * unmet_div_ww
# returnflow to river and to evapotranspiration
self.var.returnFlow = self.var.returnflowIrr + self.var.returnflowNonIrr
self.var.waterabstraction = self.var.nonFossilGroundwaterAbs + self.var.unmetDemand + self.var.act_SurfaceWaterAbstract
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_irrWithdrawal], # In
[self.var.act_totalIrrConsumption,self.var.unmet_lostirr,self.var.addtoevapotrans,self.var.returnflowIrr], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand5a", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_nonIrrWithdrawal], # In
[self.var.act_nonIrrConsumption , self.var.returnflowNonIrr, self.var.unmet_lostNonirr], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand5b", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.ind_efficiency * frac_industry * self.var.act_nonIrrWithdrawal], # In
[self.var.act_indConsumption], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand5c", False)
self.model.waterbalance_module.waterBalanceCheck(
[ self.var.act_indWithdrawal], # In
[self.var.act_indConsumption/ self.var.ind_efficiency], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand5d", False)
# ----------------------------------------------------------------
if checkOption('calcWaterBalance'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_irrWithdrawal], # In
[self.var.act_totalIrrConsumption, self.var.returnflowIrr,self.var.unmet_lostirr,self.var.addtoevapotrans], # Out
[globals.inZero],
[globals.inZero],
"Waterlossdemand1", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.nonIrrDemand, self.var.totalIrrDemand], # In
[self.var.nonFossilGroundwaterAbs, self.var.unmetDemand, self.var.act_SurfaceWaterAbstract], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand1", False)
if checkOption('includeWaterBodies'):
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_SurfaceWaterAbstract], # In
[ self.var.act_bigLakeResAbst,self.var.act_smallLakeResAbst, self.var.act_channelAbst], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand1b", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.nonFossilGroundwaterAbs, self.var.unmetDemand, self.var.act_SurfaceWaterAbstract], # In
[self.var.act_totalWaterWithdrawal], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand2", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_totalWaterWithdrawal], # In
[self.var.act_irrPaddyWithdrawal, self.var.act_irrNonpaddyWithdrawal, self.var.act_nonIrrWithdrawal], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand3", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_totalWaterWithdrawal], # In
[self.var.act_totalIrrConsumption, self.var.act_nonIrrConsumption, self.var.addtoevapotrans, self.var.returnflowIrr, self.var.returnflowNonIrr, self.var.unmet_lost], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand4", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_totalWaterWithdrawal], # In
[self.var.act_totalIrrConsumption, self.var.act_nonIrrConsumption, self.var.addtoevapotrans, self.var.returnFlow, self.var.unmet_lost ], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand5", False)
self.model.waterbalance_module.waterBalanceCheck(
[self.var.act_totalWaterWithdrawal], # In
[self.var.waterabstraction], # Out
[globals.inZero],
[globals.inZero],
"Waterdemand level1", False)
|
CWatM/CWatM
|
cwatm/hydrological_modules/water_demand/water_demand.py
|
Python
|
gpl-3.0
| 52,255
|
[
"NetCDF"
] |
69cda85a640966890de11b70cfdce53fb60babf50e0769f70fe00f9beac77441
|
from setuptools import setup
setup(name='gmshtoparticles',
version='0.1',
description='Transforms a .msh file generated with Gmsh into a particles centered inside each triangle or quad element. Outputs a .csv file description and .vtk/.vtu visualization set of files',
url='https://github.com/IaPCS/gmsh-to-nodes/',
author='Patrick Diehl, Ilyass Tabiai',
author_email='me@diehlpk.de, ilyass.tabiai@gmail.com',
license='GPL-3.0',
packages=['gmshtoparticles'],
zip_safe=False
)
|
IaPCS/gmsh-to-nodes
|
setup.py
|
Python
|
gpl-3.0
| 526
|
[
"VTK"
] |
77800d466430e5b7d577d2280d744c00b92aa83961cfa22b115c89a84ef73a43
|
# -*- coding: utf-8 -*-
"""
SpykeDemo.py creates a Layer of neurons and a connection matrix and steps
it through several iterations with an injected current.
Created on Sun Oct 04 23:26:52 2015
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
@author: Corey Hart
"""
from SpykeArchitecture import *
L = layer(100,0.1) # 100 neurons with a maximum connection weight of 0.1
recordIdx = {}
recordTimes = {}
recordNeurons = {}
#initial neuron objects
for l in xrange(len(L.neurons)):
recordNeurons[l] = list()
# store original weight matrix
w = np.zeros((100,100))
for el in xrange(100):
for el2 in xrange(100):
w[el,el2] = L.cnxns.weights[el,el2] # original weights
#loop through time index
for l in xrange(50):
L.update(10.0) # global drive = 10.0
fired = []
for num_n,n in enumerate(L.neurons):
if n.spike == True:
fired.append(num_n)
recordNeurons[num_n].append(n.time)
recordIdx[l] = fired # dictionary of neurons that have fired, indexed by time index.
recordTimes[n.time] = fired
L.cnxns.update(recordTimes,n.time,n.dt,6.25, [0.1,1.0,0.5],learning_rule = 'STDP')
|
DrSpyke/Spyke
|
SpykeDemo.py
|
Python
|
gpl-2.0
| 1,335
|
[
"NEURON"
] |
a384d2be2b8c432250d31404fe8e78fe03908d84ff19a0d68b4e287e7671b018
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Setup.py for pymatgen."""
import sys
import platform
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
"""Extension builder that checks for numpy before install."""
def finalize_options(self):
"""Override finalize_options."""
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import importlib
import numpy
importlib.reload(numpy)
self.include_dirs.append(numpy.get_include())
extra_link_args = []
if sys.platform.startswith('win') and platform.machine().endswith('64'):
extra_link_args.append('-Wl,--allow-multiple-definition')
cpp_extra_link_args = extra_link_args
cpp_extra_compile_args = ["-Wno-cpp", "-Wno-unused-function", "-O2", "-march=native", '-std=c++0x']
if sys.platform.startswith('darwin'):
cpp_extra_compile_args.append("-stdlib=libc++")
cpp_extra_link_args = ["-O2", "-march=native", '-stdlib=libc++']
# https://docs.microsoft.com/en-us/cpp/build/reference/compiler-options-listed-alphabetically?view=vs-2017
if sys.platform.startswith('win'):
cpp_extra_compile_args = ['/w', '/O2', '/std:c++0x']
cpp_extra_link_args = extra_link_args
long_desc = """
Official docs: [http://pymatgen.org](http://pymatgen.org/)
Pymatgen (Python Materials Genomics) is a robust, open-source Python library
for materials analysis. These are some of the main features:
1. Highly flexible classes for the representation of Element, Site, Molecule,
Structure objects.
2. Extensive input/output support, including support for
[VASP](http://cms.mpi.univie.ac.at/vasp/), [ABINIT](http://www.abinit.org/),
CIF, Gaussian, XYZ, and many other file formats.
3. Powerful analysis tools, including generation of phase diagrams, Pourbaix
diagrams, diffusion analyses, reactions, etc.
4. Electronic structure analyses, such as density of states and band structure.
5. Integration with the Materials Project REST API.
Pymatgen is free to use. However, we also welcome your help to improve this
library by making your own contributions. These contributions can be in the
form of additional tools or modules you develop, or feature requests and bug
reports. Please report any bugs and issues at pymatgen's [Github page]
(https://github.com/materialsproject/pymatgen). For help with any pymatgen
issues, please use the [Discourse page](https://discuss.matsci.org/c/pymatgen).
Why use pymatgen?
=================
There are many materials analysis codes out there, both commerical and free,
but pymatgen offer several advantages:
1. **It is (fairly) robust.** Pymatgen is used by thousands of researchers,
and is the analysis code powering the [Materials Project](https://www.materialsproject.org).
The analysis it produces survives rigorous scrutiny every single day. Bugs
tend to be found and corrected quickly. Pymatgen also uses
[CircleCI](https://circleci.com) and [Appveyor](https://www.appveyor.com/)
for continuous integration on the Linux and Windows platforms,
respectively, which ensures that every commit passes a comprehensive suite
of unittests.
2. **It is well documented.** A fairly comprehensive documentation has been
written to help you get to grips with it quickly.
3. **It is open.** You are free to use and contribute to pymatgen. It also means
that pymatgen is continuously being improved. We will attribute any code you
contribute to any publication you specify. Contributing to pymatgen means
your research becomes more visible, which translates to greater impact.
4. **It is fast.** Many of the core numerical methods in pymatgen have been
optimized by vectorizing in numpy/scipy. This means that coordinate
manipulations are extremely fast and are in fact comparable to codes
written in other languages. Pymatgen also comes with a complete system for
handling periodic boundary conditions.
5. **It will be around.** Pymatgen is not a pet research project. It is used in
the well-established Materials Project. It is also actively being developed
and maintained by the [Materials Virtual Lab](https://www.materialsvirtuallab.org),
the ABINIT group and many other research groups.
With effect from version 2019.1.1, pymatgen only supports Python 3.x. Users
who require Python 2.7 should install pymatgen v2018.x.
"""
setup(
name="pymatgen",
packages=find_packages(),
version="2020.4.2",
cmdclass={'build_ext': build_ext},
setup_requires=['numpy>=1.14.3', 'setuptools>=18.0'],
python_requires='>=3.6',
install_requires=["numpy>=1.14.3", "requests", "ruamel.yaml>=0.15.6",
"monty>=3.0.2", "scipy>=1.0.1", "pydispatcher>=2.0.5",
"tabulate", "spglib>=1.9.9.44", "networkx>=2.2",
"matplotlib>=1.5", "palettable>=3.1.1", "sympy", "pandas",
"plotly>=4.5.0"],
extras_require={
"provenance": ["pybtex"],
"ase": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinit": ["netcdf4"],
':python_version < "3.7"': [
"dataclasses>=0.6",
]},
package_data={
"pymatgen.core": ["*.json", "py.typed"],
"pymatgen.analysis": ["*.yaml", "*.json", "*.csv"],
"pymatgen.analysis.chemenv.coordination_environments.coordination_geometries_files": ["*.txt", "*.json"],
"pymatgen.analysis.chemenv.coordination_environments.strategy_files": ["*.json"],
"pymatgen.analysis.magnetism": ["*.json", "*.yaml"],
"pymatgen.analysis.structure_prediction": ["data/*.json", "*.yaml"],
"pymatgen.io": ["*.yaml"],
"pymatgen.io.vasp": ["*.yaml", "*.json"],
"pymatgen.io.lammps": ["templates/*.*", "*.yaml"],
"pymatgen.io.feff": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml", "*.json", "*.sqlite"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Pymatgen Development Team",
author_email="ongsp@eng.ucsd.edu",
maintainer="Shyue Ping Ong, Matthew Horton",
maintainer_email="ongsp@eng.ucsd.edu, mkhorton@lbl.gov",
url="http://www.pymatgen.org",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
long_description_content_type='text/markdown',
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "qchem", "materials", "science",
"project", "electronic", "structure", "analysis", "phase", "diagrams",
"crystal"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[Extension("pymatgen.optimization.linear_assignment",
["pymatgen/optimization/linear_assignment.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.util.coord_cython",
["pymatgen/util/coord_cython.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.optimization.neighbors",
["pymatgen/optimization/neighbors.cpp"],
extra_compile_args=cpp_extra_compile_args,
extra_link_args=cpp_extra_link_args,
language='c++')],
entry_points={
'console_scripts': [
'pmg = pymatgen.cli.pmg:main',
'feff_input_generation = pymatgen.cli.feff_input_generation:main',
'feff_plot_cross_section = pymatgen.cli.feff_plot_cross_section:main',
'feff_plot_dos = pymatgen.cli.feff_plot_dos:main',
'gaussian_analyzer = pymatgen.cli.gaussian_analyzer:main',
'get_environment = pymatgen.cli.get_environment:main',
]
}
)
|
gVallverdu/pymatgen
|
setup.py
|
Python
|
mit
| 9,220
|
[
"ABINIT",
"ASE",
"CRYSTAL",
"FEFF",
"Gaussian",
"LAMMPS",
"NWChem",
"VASP",
"VTK",
"pymatgen"
] |
ad9e7528aed142aff31ba13d14b530f3e99396b8ec8c3911e5b0d6555b7667db
|
#! /usr/bin/env python
import math, sys, os , platform
from collections import Iterable
from optparse import OptionParser
import pybel2
import openbabel as op
import numpy as np
# http://openbabel.org/dev-api/classOpenBabel_1_1OBMol.shtml
Molecule=pybel2.Molecule
Atom=pybel2.Atom
Bond=pybel2.Bond
def pqrbug(filename):
# Bug in Openbabel for PQR format reader
# Return the string for the file, which can be read by:
# pybel.readstring('pqr',pqrbug(filename)).
# ob.OBConversion().ReadString(obmol, string)
# BUG has been removed in Mac version.
f=open(filename);
lines=f.readlines();
out=""
for line in lines:
if line[:6]=="ATOM " or line[:6]=="HETATM":
newline=line
# Bug in Openbabel 2.3.2 windows version
# Mac have exclude the error, only for window
if (platform.system()=="Windows"):
newline=newline[:-3]+'\n';
# Br ->B error in PQR format..
if (line[12:15]==" Br"): newline=newline[:12]+"Br "+newline[15:]
if (line[12:15]==" Cl"): newline=newline[:12]+"Cl "+newline[15:]
if (line[12:15]==" Na"): newline=newline[:12]+"Na "+newline[15:]
if (line[12:15]==" Mg"): newline=newline[:12]+"Mg "+newline[15:]
out+=newline
else: out+=line;
f.close()
return out;
def pqrx_reader(filename, lastlen=12, columns=1):
# read the pqrx/pqra/pqrta file containing atomic data
# Return (pqr_string, atomic_data_list).
# atomic_data_list is many lists saving multi-types data!
# This function should *not* be used with pqrbug function!
if (lastlen<=0 or columns<1):
raise ValueError("Error last len or column number!");
exit(1)
f=open(filename);
lines=f.readlines();
out=""
# Initial output data in a list
# In this list, each column data as a children list.
outdata=[];
for i in range(columns):
outdata.append([]);
# Deal with each line
for line in lines:
if line[:6]=="ATOM " or line[:6]=="HETATM":
# Split the data and pqr data in a line.
newline=line.rstrip('\n')
newline=newline.rstrip('\r')
data=newline[-lastlen:];
newline=newline[:-lastlen]+'\n'
# Set the data
datas=data.strip().split()
if (len(datas)!=columns):
raise ValueError("Given column number not the same to the data!");
exit(1);
for i in range(columns):
outdata[i].append(datas[i])
# Mac have exclude the error, only for window
if (platform.system()=="Windows"):
newline=newline[:-3]+'\n';
# Br ->B error in PQR format..
if (line[12:15]==" Br"): newline=newline[:12]+"Br "+newline[15:]
if (line[12:15]==" Cl"): newline=newline[:12]+"Cl "+newline[15:]
if (line[12:15]==" Na"): newline=newline[:12]+"Na "+newline[15:]
if (line[12:15]==" Mg"): newline=newline[:12]+"Mg "+newline[15:]
out+=newline
else: out+=line;
f.close()
return (out,outdata);
def calcdipoleAtoms(*atoms):
# give many atom as input
# Best method for list of atoms should calcdipoleAtoms(*Atomlist)
if (len(atoms)<= 0):
raise TypeError("Errors: No Input Atoms!")
return 0.0
# if giving a list of atoms.
if (isinstance(atoms[0],list)):
atoms=atoms[0]
if (not isinstance(atoms[0],Atom)):
raise TypeError("Errors: Input should be Atom!")
return 0.0
dx=0.0;dy=0.0;dz=0.0
for atom in atoms:
coor=atom.coords
charge=atom.partialcharge
dx+=coor[0]*charge
dy+=coor[1]*charge
dz+=coor[2]*charge
dipole=math.sqrt(pow(dx,2)+pow(dy,2)+pow(dz,2))
return dipole
def calcdipoleBond(bond):
# Give a bond as input
bgn=bond.bgn
end=bond.end
bgncoor=bgn.coords
bgncharge=bgn.partialcharge
endcoor=end.coords
endcharge=end.partialcharge
dipole=math.sqrt(pow((bgncharge*bgncoor[0]+endcharge*endcoor[0]),2)
+pow((bgncharge*bgncoor[1]+endcharge*endcoor[1]),2)
+pow((bgncharge*bgncoor[2]+endcharge*endcoor[2]),2));
return dipole
def atomnumBondPair(bond):
# Return bond atoms atomic number pair, such as C-O return (6,7)
return tuple(sorted([bond.bgn.atomicnum,bond.end.atomicnum]))
def atomNumHyd(atom):
# Return Atom's (atomic number, hydribazation) pair.
return (atom.atomicnum,atom.hyb)
def distance(atom1,atom2):
# Return distance between two atoms.
return atom1.OBAtom.GetDistance(atom2.OBAtom);
def MolInfo(mol,printInfo=True):
# Ruturn a list containing molecular information/features
smile=mol.write('smi').strip().split()[0]
dipole=calcdipoleAtoms(*mol.atoms)
TNatms=mol.OBMol.NumAtoms();
HEatms=mol.OBMol.NumHvyAtoms();
Hatms=mol.OBMol.NumAtoms()-mol.OBMol.NumHvyAtoms();
TNbnds=mol.OBMol.NumBonds();
moldesc=mol.calcdesc()
sbnds=int(moldesc['sbonds']);
dbnds=int(moldesc['dbonds']);
tbnds=int(moldesc['tbonds']);
abnds=int(moldesc['abonds']);
if (printInfo):
print "Mol Formula:",mol.formula
print "Mol Weight:",mol.molwt
print "Mol SMILE:",smile
print "Mol dipole:",dipole;
print "Total Atoms number:", TNatms;
print "Heavy Atom number:", HEatms;
print "Hydrogen number:", Hatms;
print "Bond number:", TNbnds;
print "Single Bond number:",sbnds
print "Double Bond number:",dbnds
print "Triple Bond number:",tbnds
print "Aromatic Bond number:",abnds
return [mol.formula,mol.molwt,smile,dipole,TNatms,HEatms,Hatms,TNbnds,sbnds,dbnds,tbnds,abnds]
def descVar(*args):
# Return [max, min, sum, average, std] for given data
if (isinstance(args[0],list)):
args=args[0]
mx=max(args)
mi=min(args)
sumall=math.fsum(args)
aver=sumall/len(args)
var= math.fsum((pow(x-aver,2) for x in args)) /(len(args))
std=math.sqrt(var)
return (mx,mi,sumall,aver,std)
def featureDict2List(ftype, fdict):
# Arrange data in a fdict to the sequence as giving list ftype(saving keys)
# if in key:value, the value is a list, it will expand to several data for this key in the final list.
features=[]
for f in ftype:
if (isinstance(fdict[f],Iterable)):
features+=list(fdict[f])
else:
features.append(fdict[f])
return features
def CalcDataElementFeature(mol,data):
# Calculate Element based Features based on data and molecule
# data should based on atom, with same sequence
elements=[1,6,7,8,9,15,16,17,35,53]
atomsnum=[atom.atomicnum for atom in mol];
pcdict={};
pcdesc={}
fdata=map(float,data);
for ele in elements:
pcdict[ele]=[]
for i in range(len(atomsnum)):
pcdict[atomsnum[i]].append(fdata[i]);
for ele in elements:
if (len(pcdict[ele])>0):
pcdesc[ele]=descVar(pcdict[ele])
else:
pcdesc[ele]=descVar([0.0])
# Data Value Max, Min, Sum, Average, Std:
# For H,C,N,O,F,P,S,Cl,Br,I
# Element Partial Charge Max, Min, Sum, Average, Std:
return list(descVar(fdata))+featureDict2List(elements,pcdesc)
def CalcFeatures(mol,printInfo=True):
atoms=mol.atoms;
atomshyb=[atomNumHyd(atom) for atom in atoms];
bonds=mol.bonds;
# H,C,N,O,F,P,S,Cl,Br,I
elements=[1,6,7,8,9,15,16,17,35,53]
# molecule
molinfo=MolInfo(mol,printInfo=printInfo);
elecounts={}
for ele in elements:
elecounts[ele]=0
for atom in mol:
an=atom.atomicnum
elecounts[an]=elecounts.get(an,0)+1
# partial charge
acDict={}; #for sum element pcharge
acDictAbs={};
pcs=[atom.partialcharge for atom in mol]
pcsAbs=[abs(pc) for pc in pcs]
pcdict={} #for saving each atom pcharge in a element key
pcdesc={} #for saving Max/min.. for a element
pcAbsdict={}
pcAbsdesc={}
for ele in elements:
pcdict[ele]=[]
pcAbsdict[ele]=[]
acDict[ele]=0.0
acDictAbs[ele]=0.0
for atom in mol:
an=atom.atomicnum
pcdict[an].append(atom.partialcharge)
pcAbsdict[an].append(abs(atom.partialcharge))
acDict[an]=acDict.get(an,0.0)+atom.partialcharge;
acDictAbs[an]=acDictAbs.get(an,0.0)+abs(atom.partialcharge);
for ele in elements:
if (len(pcdict[ele])>0):
pcdesc[ele]=descVar(pcdict[ele])
pcAbsdesc[ele]=descVar(pcAbsdict[ele])
else:
pcdesc[ele]=descVar([0.0])
pcAbsdesc[ele]=descVar([0.0])
elePCfeatures=featureDict2List(elements,acDict)+featureDict2List(elements,acDictAbs)+featureDict2List(elements,pcdesc)+featureDict2List(elements,pcAbsdesc)
if (printInfo):
# mol feature
print "Partial Charge Max, Min, Sum, Average, Std:",descVar(pcs)
print "Abs Partial Charge Max, Min, Sum, Average, Std:",descVar(pcsAbs)
# element feature
print "Element Partial Charge:",acDict
print "Element Abs Partial Charge:",acDictAbs
print "Element Partial Charge Max, Min, Sum, Average, Std:",pcdesc
print "Abs Element Partial Charge Max, Min, Sum, Average, Std:",pcAbsdesc
# hybridization
EleHyb={}
for atom in mol:
ehyb=atomNumHyd(atom)
if (ehyb[0] is 6):
if (ehyb[1] is 1):
EleHyb["C1"]=EleHyb.get("C1",0)+1
elif (ehyb[1] is 2):
EleHyb["C2"]=EleHyb.get("C2",0)+1
elif (ehyb[1] is 3):
EleHyb["C3"]=EleHyb.get("C3",0)+1
if (ehyb[0] is 7):
if (ehyb[1] is 1):
EleHyb["N1"]=EleHyb.get("N1",0)+1
elif (ehyb[1] is 2):
EleHyb["N2"]=EleHyb.get("N2",0)+1
elif (ehyb[1] is 3):
EleHyb["N3"]=EleHyb.get("N3",0)+1
if (ehyb[0] is 8):
if (ehyb[1] is 1):
EleHyb["O1"]=EleHyb.get("O1",0)+1
elif (ehyb[1] is 2):
EleHyb["O2"]=EleHyb.get("O2",0)+1
elif (ehyb[1] is 3):
EleHyb["O3"]=EleHyb.get("O3",0)+1
if (ehyb[0] is 16):
if (ehyb[1] is 1):
EleHyb["S1"]=EleHyb.get("S1",0)+1
elif (ehyb[1] is 2):
EleHyb["S2"]=EleHyb.get("S2",0)+1
elif (ehyb[1] is 3):
EleHyb["S3"]=EleHyb.get("S3",0)+1
hybtypes=["C1","C2","C3","N1","N2","N3","O1","O2","O3","S1","S2","S3"];
hybcount={}
for t in hybtypes:
hybcount[t]=EleHyb.get(t,0)
hybfeatures=featureDict2List(hybtypes,hybcount)
if (printInfo):
#print "Element Hybridization:",EleHyb
print "Element Hybridization count:",hybcount
# dipole
dipoles= [calcdipoleBond(bond) for bond in mol.bonds]
bndpair= [atomnumBondPair(bond) for bond in mol.bonds ]
bpd={}
for i in range(len(dipoles)):
dp=dipoles[i]
bp=bndpair[i]
if not bpd.has_key(bp):
bpd[bp]=[]
bpd[bp].append(dp)
bpneed=[(1,6),(1,7),(1,8),(1,16),(6,6),(6,7),(6,8),(6,9),(6,15),(6,16),(6,17),(6,35),(6,53),
(7,8),(8,15),(8,16),(15,16),(16,16)]
bpddesc={}
for bpn in bpneed:
bpddesc[bpn]=descVar(bpd.get(bpn,[0.0]))
dipolefeautures=featureDict2List(bpneed,bpddesc)
if printInfo:
# mol feature
print "Bond Dipoles Max, Min, Sum, Average, Std:", descVar(dipoles)
# element feature
print "Bond Atom Pair Dipoles Max, Min, Sum, Average, Std:",bpddesc
# Mol Formula, Mol Weight, Mol SMILE, Mol dipole, Total Atoms number, Heavy Atom number, Hydrogen number,
# Bond number, Single Bond number, Double Bond number, Triple Bond number, Aromatic Bond number (molinfo)
# For H,C,N,O,F,P,S,Cl,Br,I (elecounts)
# Element number
# Partial Charge, Abs Partial Charge, Bond Dipoles: Max, Min, Sum, Average, Std (descVar(pcs)+descVar(pcsAbs)+descVar(dipoles))
#
# For H,C,N,O,F,P,S,Cl,Br,I (elePCfeatures)
# Element Partial Charge
# Element Abs Partial Charge
# Element Partial Charge Max, Min, Sum, Average, Std
# Abs Element Partial Charge Max, Min, Sum, Average, Std
#
# For ["C1","C2","C3","N1","N2","N3","O1","O2","O3","S1","S2","S3"] (hybfeatures)
# Element Hybridization count
#
# For [(1,6),(1,7),(1,8),(1,16),(6,6),(6,7),(6,8),(6,9),(6,15),(6,16),(6,17),(6,35),(6,53),(7,8),(8,15),(8,16),(15,16),(16,16)]
# For HC,HN,HO,HS,CC,CN,CO,CF,CP,CS,CCl,CBr,CI,NO,OP,OS,PO,SS (dipolefeautures)
# Bond Atom Pair Dipoles Max, Min, Sum, Average, Std
outlist=molinfo+featureDict2List(elements, elecounts)+list(descVar(pcs))+list(descVar(pcsAbs))+list(descVar(dipoles)) \
+elePCfeatures+hybfeatures+dipolefeautures
if printInfo:print outlist
return [ str(f) for f in outlist ]
def featureString():
# 12 mol feature
fstr="Mol_Formula Mol_Weight Mol_SMILE Mol_dipole Total_Atoms_number Heavy_Atom_number Hydrogen_number "
fstr+="Bond_number Single_Bond_number Double_Bond_number Triple_Bond_number Aromatic_Bond_number "
# 10 element partial charge feature
for i in ["H","C","N","O","F","P","S","Cl","Br","I"]:
fstr+=(i+"_"+"num"+" ")
# 15 mol feature
for i in ["PartCharge","AbsPartCharge","Bond_Dipole"]:
for j in ["Max","Min","Sum","Aver","Std"]:
fstr+=(i+"_"+j+" ")
# 120 element partial charge feature
for i in ["H","C","N","O","F","P","S","Cl","Br","I"]:
fstr+=(i+"_"+"PC"+" ")
for i in ["H","C","N","O","F","P","S","Cl","Br","I"]:
fstr+=(i+"_"+"APC"+" ")
for i in ["H","C","N","O","F","P","S","Cl","Br","I"]:
for j in ["Max","Min","Sum","Aver","Std"]:
fstr+=(i+"_"+"PC"+"_"+j+" ")
for i in ["H","C","N","O","F","P","S","Cl","Br","I"]:
for j in ["Max","Min","Sum","Aver","Std"]:
fstr+=(i+"_"+"APC"+"_"+j+" ")
# 12 hybrid feature
for i in ["C1","C2","C3","N1","N2","N3","O1","O2","O3","S1","S2","S3"]:
fstr+=(i+"_"+"Hyb"+" ")
# 90 atom pair bond dipole feature
for i in [ "HC","HN","HO","HS","CC","CN","CO","CF","CP","CS","CCl","CBr","CI","NO","OP","OS","PO","SS"]:
for j in ["Max","Min","Sum","Aver","Std"]:
fstr+=(i+"_"+"DP"+"_"+j+" ")
#print fstr
return fstr
if __name__ =="__main__":
helpdes='''Calculate features of molecules based on Pybel and Openbabel.
# For one file, use -i option to assign the input file;
# For many files, use -m option to assign a file containing file name without extension.
# -f option can assign the file format. It must be given when using -m option.
# Without -f option and using -i option, the format will be deduced based on file extension.
# -t option will print the title for features.'''
parser = OptionParser(description=helpdes)
parser.add_option("-i", "--input", action="store",
dest="input", default="",
help="Read input data from input file")
parser.add_option("-m", "--multi", action="store",
dest="multi", default="",
help="File containing file name without extension, format must be assigned!")
parser.add_option("--prefix", action="store",
dest="prefixname", default="",
help="Perfix part of file name before id read from -m file")
parser.add_option("--mid", action="store",
dest="midname", default="",
help="Middle part of file name between id read from -m file and -f format extension")
parser.add_option("-f", "--format", action="store",
dest="format", default="",
help="Input file format")
parser.add_option("-o", "--output", action="store",
dest="output", default="",
help="The output file to save result")
parser.add_option("-t", "--title", action="store_true",
dest="title", default=False,
help="Print the feature title")
(options, args) = parser.parse_args()
if (len(sys.argv)<2):
print "Please assign an input file or a file containing all file prefix!"
parser.print_help()
#parser.print_description()
#parser.print_usage()
exit(1)
# Let the stdout to an output file!
stdout=sys.stdout
if (options.output!=""):
ftmp=open(options.output,'w');
sys.stdout=ftmp
datas=[] #savine extra mol datas
extrastring="" #saving extra title string
# Using a simple input molecule file
if (options.input != "" and options.multi == ""):
filename=options.input
fnamelist=os.path.splitext(filename)
# set format
fformat=options.format
if (fformat==""):
fformat=fnamelist[1][1:]
fformat=fformat.lower()
extradatacolumn=0
if (fformat=='pqrt' or fformat=='pqra' or fformat=='pqrx'):
extradatacolumn=1
elif (fformat=='pqrta'):
extradatacolumn=2
for datanum in range(0,extradatacolumn):
for i in ["Max","Min","Sum","Aver","Std"]:
extrastring+=(i+"_"+"data"+str(datanum+1)+" ")
for i in ["H","C","N","O","F","P","S","Cl","Br","I"]:
for j in ["Max","Min","Sum","Aver","Std"]:
extrastring+=(i+"_"+"data"+str(datanum+1)+"_"+j+" ")
# Print header
if (options.title): print featureString()+extrastring
# Special for pqr related format
if (fformat=="pqr"):
mol=pybel2.readstring('pqr',pqrbug(filename));
elif (fformat=="pqra" or fformat=="pqrx" or fformat=="pqrt"):
molstr,datas=pqrx_reader(filename, lastlen=12*extradatacolumn, columns=extradatacolumn);
mol=pybel2.readstring('pqr',molstr);
elif (fformat=="pqrta"):
molstr,datas=pqrx_reader(filename, lastlen=12*extradatacolumn, columns=extradatacolumn);
mol=pybel2.readstring('pqr',molstr);
else:
mol=pybel2.readfile(fformat,filename).next();
# Calculate general features
features=CalcFeatures(mol,printInfo=False)
# Calculate extra features based on input data file
dataout=[] #saving extra output features
for data in datas:
dataout+=CalcDataElementFeature(mol,data)
features+=dataout
print fnamelist[0]+" "+" ".join(map(str,features))
# Compound id in a file to process batch
elif (options.multi != "" and options.format != ""):
fin=open(options.multi)
flist=fin.readlines()
fin.close()
fformat=options.format
fformat=fformat.lower()
extradatacolumn=0
if (fformat=='pqrt' or fformat=='pqra' or fformat=='pqrx'):
extradatacolumn=1
elif (fformat=='pqrta'):
extradatacolumn=2
for datanum in range(0,extradatacolumn):
for i in ["Max","Min","Sum","Aver","Std"]:
extrastring+=(i+"_"+"data"+str(datanum+1)+" ")
for i in ["H","C","N","O","F","P","S","Cl","Br","I"]:
for j in ["Max","Min","Sum","Aver","Std"]:
extrastring+=(i+"_"+"data"+str(datanum+1)+"_"+j+" ")
# Print header
if (options.title): print featureString()+extrastring
for f in flist:
try:
filename=options.prefixname+f.strip()+options.midname+"."+fformat
if (fformat=="pqr"):
mol=pybel2.readstring('pqr',pqrbug(filename));
elif (fformat=="pqra" or fformat=="pqrx" or fformat=="pqrt"):
molstr,datas=pqrx_reader(filename, lastlen=12*extradatacolumn, columns=extradatacolumn);
mol=pybel2.readstring('pqr',molstr);
elif (fformat=="pqrta"):
molstr,datas=pqrx_reader(filename, lastlen=12*extradatacolumn, columns=extradatacolumn);
mol=pybel2.readstring('pqr',molstr);
else:
mol=pybel2.readfile(fformat,filename).next();
# Calculate general features
features=CalcFeatures(mol,printInfo=False)
# Calculate extra features based on input data file
dataout=[] #saving extra output features
for data in datas:
dataout+=CalcDataElementFeature(mol,data)
features+=dataout
print f.strip()+" "+" ".join(map(str,features))
except IOError:
print f.strip()
else:
raise ValueError("No input file!")
exit(1)
sys.stdout=stdout;
if (options.output!=""):ftmp.close()
|
platinhom/CADDHom
|
python/molecule/OBabel/pybel_feature.py
|
Python
|
gpl-2.0
| 18,144
|
[
"Pybel"
] |
b9d12280a21c0807d7f77f8c6f65ca15a88de554e8f283a41e3aef2c5a924551
|
# $HeadURL$
""" Cache for the Plotting service plots
"""
__RCSID__ = "$Id$"
import os
import os.path
import time
import threading
from DIRAC import S_OK, S_ERROR, gLogger, rootPath
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.Graphs import graph
class PlotCache:
def __init__( self, plotsLocation = False ):
self.plotsLocation = plotsLocation
self.alive = True
self.__graphCache = DictCache( deleteFunction = _deleteGraph )
self.__graphLifeTime = 600
self.purgeThread = threading.Thread( target = self.purgeExpired )
self.purgeThread.start()
def setPlotsLocation( self, plotsDir ):
self.plotsLocation = plotsDir
for plot in os.listdir( self.plotsLocation ):
if plot.find( ".png" ) > 0:
plotLocation = "%s/%s" % ( self.plotsLocation, plot )
gLogger.verbose( "Purging %s" % plotLocation )
os.unlink( plotLocation )
def purgeExpired( self ):
while self.alive:
time.sleep( self.__graphLifeTime )
self.__graphCache.purgeExpired()
def getPlot( self, plotHash, plotData, plotMetadata, subplotMetadata ):
"""
Get plot from the cache if exists, else generate it
"""
plotDict = self.__graphCache.get( plotHash )
if plotDict == False:
basePlotFileName = "%s/%s.png" % ( self.plotsLocation, plotHash )
if subplotMetadata:
retVal = graph( plotData, basePlotFileName, plotMetadata, metadata = subplotMetadata )
else:
retVal = graph( plotData, basePlotFileName, plotMetadata )
if not retVal[ 'OK' ]:
return retVal
plotDict = retVal[ 'Value' ]
if plotDict[ 'plot' ]:
plotDict[ 'plot' ] = os.path.basename( basePlotFileName )
self.__graphCache.add( plotHash, self.__graphLifeTime, plotDict )
return S_OK( plotDict )
def getPlotData( self, plotFileName ):
filename = "%s/%s" % ( self.plotsLocation, plotFileName )
try:
fd = file( filename, "rb" )
data = fd.read()
fd.close()
except Exception, v:
return S_ERROR( "Can't open file %s: %s" % ( plotFileName, str( v ) ) )
return S_OK( data )
def _deleteGraph( plotDict ):
try:
for key in plotDict:
value = plotDict[ key ]
if value and os.path.isfile( value ):
os.unlink( value )
except:
pass
gPlotCache = PlotCache()
|
calancha/DIRAC
|
FrameworkSystem/Service/PlotCache.py
|
Python
|
gpl-3.0
| 2,388
|
[
"DIRAC"
] |
400e1f876497612ad5d398315f762fa27418bd6588ec1f85d479572f8f189d73
|
""" PilotCommand
The PilotCommand class is a command class to know about present pilots
efficiency.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getSites, getCESiteMapping
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.WorkloadManagementSystem.Client.PilotManagerClient import PilotManagerClient
class PilotCommand(Command):
"""
Pilot "master" Command.
"""
def __init__(self, args=None, clients=None):
super(PilotCommand, self).__init__(args, clients)
if 'Pilots' in self.apis:
self.pilots = self.apis['Pilots']
else:
self.pilots = PilotManagerClient()
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis['ResourceManagementClient']
else:
self.rmClient = ResourceManagementClient()
def _storeCommand(self, result):
"""
Stores the results of doNew method on the database.
"""
for pilotDict in result:
resQuery = self.rmClient.addOrModifyPilotCache(site=pilotDict['Site'],
cE=pilotDict['CE'],
vO=pilotDict.get('OwnerGroup', None),
pilotsPerJob=pilotDict['PilotsPerJob'],
pilotJobEff=pilotDict['PilotJobEff'],
status=pilotDict['Status'])
if not resQuery['OK']:
return resQuery
return S_OK()
def _prepareCommand(self):
"""
JobCommand requires one arguments:
- name : <str>
"""
self.log.debug("_prepareCommand: args:", self.args)
if 'name' not in self.args:
return S_ERROR('"name" not found in self.args')
name = self.args['name']
if 'element' not in self.args:
return S_ERROR('element is missing')
element = self.args['element']
if 'vO' not in self.args:
return S_ERROR('_prepareCommand: "vO" not found in self.args')
vo = self.args['vO']
if element not in ['Site', 'Resource']:
return S_ERROR('"%s" is not Site nor Resource' % element)
return S_OK((element, name, vo))
def doNew(self, masterParams=None):
self.log.debug('PilotCommand doNew')
if masterParams is not None:
element, name = masterParams
else:
params = self._prepareCommand()
if not params['OK']:
return params
element, name = params['Value']
wmsDict = {}
if element == 'Site':
wmsDict = {'GridSite': name}
elif element == 'Resource':
wmsDict = {'ExpandSite': name}
else:
# You should never see this error
return S_ERROR('"%s" is not Site nor Resource' % element)
if element == 'Resource':
pilotsResultPivot = self.pilots.getGroupedPilotSummary({}, ['GridSite', 'DestinationSite', 'OwnerGroup'])
elif element == 'Site':
pilotsResultPivot = self.pilots.getGroupedPilotSummary({}, ['GridSite', 'OwnerGroup'])
else:
# You should never see this error
return S_ERROR('"%s" is not Site nor Resource' % element)
if not pilotsResultPivot['OK']:
return pilotsResultPivot
pilotsResults = pilotsResultPivot['Value']
if 'ParameterNames' not in pilotsResults:
return S_ERROR('Wrong result dictionary, missing "ParameterNames"')
params = pilotsResults['ParameterNames']
if 'Records' not in pilotsResults:
return S_ERROR('Wrong formed result dictionary, missing "Records"')
records = pilotsResults['Records']
uniformResult = []
for record in records:
# This returns a dictionary with the following keys:
# 'Site', 'CE', 'Submitted', 'Ready', 'Scheduled', 'Waiting', 'Running',
# 'Done', 'Aborted', 'Done_Empty', 'Aborted_Hour', 'Total', 'PilotsPerJob',
# 'PilotJobEff', 'Status', 'InMask'
pilotDict = dict(zip(params, record))
pilotDict['PilotsPerJob'] = float(pilotDict['PilotsPerJob'])
pilotDict['PilotJobEff'] = float(pilotDict['PilotJobEff'])
uniformResult.append(pilotDict)
storeRes = self._storeCommand(uniformResult)
if not storeRes['OK']:
return storeRes
return S_OK(uniformResult)
def doCache(self):
self.log.debug('PilotCommand doCache')
params = self._prepareCommand()
if not params['OK']:
return params
element, name, vo = params['Value']
if element == 'Site':
# WMS returns Site entries with CE = 'Multiple'
site, ce = name, 'Multiple'
elif element == 'Resource':
site, ce = None, name
else:
# You should never see this error
return S_ERROR('"%s" is not Site nor Resource' % element)
result = self.rmClient.selectPilotCache(site=site, cE=ce)
if result['OK']:
result = S_OK([dict(zip(result['Columns'], res)) for res in result['Value']])
self.log.debug("PilotCommand doCache result: ", result)
return result
def doMaster(self):
self.log.debug('PilotCommand doMaster')
siteNames = getSites()
if not siteNames['OK']:
return siteNames
siteNames = siteNames['Value']
res = getCESiteMapping()
if not res['OK']:
return res
ces = list(res['Value'])
pilotResults = self.doNew(('Site', siteNames))
if not pilotResults['OK']:
self.metrics['failed'].append(pilotResults['Message'])
pilotResults = self.doNew(('Resource', ces))
if not pilotResults['OK']:
self.metrics['failed'].append(pilotResults['Message'])
return S_OK(self.metrics)
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Command/PilotCommand.py
|
Python
|
gpl-3.0
| 5,800
|
[
"DIRAC"
] |
154042b7cb2afb7c0af35d93793286e4cda6e62cdae391646334d3451eaab8b2
|
"""
The controllers module provides different controller classes,
applicable to different simulations.
A controller object's job is to control simulations-
At a high level a controller objects accepts a list of
parameters and chromosomes and (usually) returns
corresponding simulation data.
This is implemented polymporphically in subclasses.
Each controller class must therefore provide a run method, which is used by
the evaluator to run a simulation.
A controller must be able to accept simulation parameters (chromosomes)
from the evaluator.
The evaluator is therefore only concerned with assigining fitness to chromosomes.
On the whole this allows for deep modularization -
as long as the user can provide a controller which will (for instance)
reutrn sample and time arrays for arbitrary chromosome and parameter
lists a range of evaluators would be able to utilise it.
"""
import os
import subprocess
import math
class __Controller():
"""
Controller base class
"""
def run(self,
candidates,
parameters):
"""
At a high level - accepts a list of parameters and chromosomes
and (usually) returns corresponding simulation data. This is
implemented polymporphically in subclasses.
"""
raise NotImplementedError("Valid controller requires run method!")
class CLIController(__Controller):
"""
Control simulations via command line arguments executed through the Python os module.
"""
def __init__(self,cli_argument):
self.cli_argument = cli_argument
def run(self,
candidates,
parameters,
fitness_filename='evaluations'):
#"Run simulation"
for chromosome in candidates:
self.chromosome=chromosome
self.parameters=parameters #actually unneeded
#this manipulation is slightly messy, done for conversion of chromosome
#into something that can be executed on the shell
chromosome_str = ''.join(str(e)+' ' for e in chromosome)
cla = self.cli_argument+' '+fitness_filename+' '+chromosome_str
print(cla)
subprocess.call(cla, shell=True)
class NrnProject(__Controller):
"""
Run an nrnproject simulation based on optimizer parameters."""
def __init__(self,
nrnproject_path,
db_path,
exp_id=None):
self.sim_main_path=os.path.join(nrnproject_path,
'src/simrunner.py')
self.nrnproject_path=nrnproject_path
self.db_path=db_path
self.exp_id=exp_id
def __generate_cla(self):
sim_var_string = self.__generate_sim_var_string()
cla='python '+ self.sim_main_path + sim_var_string
return cla
def __generate_sim_var_string(self):
sim_var_string=''
for i in enumerate(self.parameters):
sim_var_string+= ' "sim_var[\'' + i[1] +'\'] = ' + str(self.chromosome[i[0]]) + '\"'
if self.exp_id !=None:
sim_var_string+= ' "sim_var[\'exp_id\'] ='+ str(self.exp_id) + '\"'
return sim_var_string
def run(self,
candidates,
parameters):
#"""Run simulations"""
import sqldbutils
exp_data_array=[]
for chromosome in candidates:
self.chromosome=chromosome
self.parameters=parameters
exp_id = sqldbutils.generate_exp_ids(self.db_path)
cla=self.__generate_cla()
os.chdir(self.nrnproject_path+'/src/') #there should be a smarter way
os.system(cla)
print(self.db_path)
print(exp_id)
exp_data=sqldbutils.sim_data(self.db_path,exp_id)
exp_data_array.append(exp_data)
return exp_data_array
class __CondorContext(object):
"""Context for Condor-based grid"""
def __init__(self,
host,
username,
password,
port):
self.messagehost=ssh_utils.host(host,username,
password,port)
def __split_list(self,
alist,
wanted_parts=1):
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def __prepare_candidates(self,candidates,candidates_per_job=1):
#Split candidate list into smaller ones (jobs):
#and make a job list
if optimizer_params.candidates_in_job != None:
candidates_in_job=optimizer_params.candidates_in_job
else:
candidates_in_job=candidates_per_job
num_candidates=len(candidates)
ids=range(num_candidates)
enumerated_candidates=zip(candidates,ids)
num_jobs=num_candidates/candidates_in_job
self.num_jobs=num_jobs
self.job_list=self.__split_list(enumerated_candidates,wanted_parts=self.num_jobs)
def __make_job_file(self,job,job_number):
#write the header:
filepath = os.path.join(self.tmpdir, 'run' + str(job_number) + '.sh')
run_shell = open(filepath, 'w')
run_shell.write('#!/bin/bash\n')
run_shell.write('reldir=`dirname $0`\n')
run_shell.write('cd $reldir\n')
run_shell.write('directory=`pwd`\n')
run_shell.write('pndirectory=$directory\n')
run_shell.write('#Untar the file:\n')
run_shell.write('/bin/tar xzf ./portable-neuron.tar.gz\n')
tarfile_name=optimizer_params.tarred_nrnproj
run_shell.write('/bin/tar xzf ./'+tarfile_name+'\n')
#CandidateData_list=[]
for enumerated_candidate in job:
chromosome = enumerated_candidate[0]
candidate_info = CandidateData(chromosome)
exp_id = enumerated_candidate[1]
candidate_info.set_exp_id(exp_id)
candidate_info.set_job_num(job_number)
self.CandidateData_list.append(candidate_info)
nproj = controllers.NrnProjSimRun(optimizer_params.project_path, chromosome)
run_shell.write('#issue the commands\n')
run_shell.write('$pndirectory/pnpython.sh \
$directory/src/simrunner.py "sim_var[\'exp_id\'] \
= ' + str(exp_id) + '\" ' + '"sim_var[\'''dbname''\'] \
= \'outputdb' + str(job_number) + '.sqlite\'"' +
nproj.sim_var_string + '\n')
run_shell.write('echo \'done\'\n')
run_shell.write('cp $directory/sims/outputdb' + str(job_number) + '.sqlite $directory\n')
#self.CandidateData_list=CandidateData_list
run_shell.close()
def __make_submit_file(self):
""" write the condor submit files"""
filepath = os.path.join(self.tmpdir, 'submitfile.submit')
submit_file=open(filepath,'w')
submit_file.write('universe = vanilla\n')
submit_file.write('log = pneuron.log\n')
submit_file.write('Error = err.$(Process)\n')
submit_file.write('Output = out.$(Process)\n')
submit_file.write('requirements = GLIBC == "2.11"\n')
tarfile_name=optimizer_params.tarred_nrnproj
submit_file.write('transfer_input_files = portable-neuron.tar.gz,'+tarfile_name+'\n')
submit_file.write('should_transfer_files = yes\n')
submit_file.write('when_to_transfer_output = on_exit_or_evict\n')
#this is where you have to do the clever stuff:
for shellno in range(self.num_jobs):
submit_file.write('executable = run'+str(shellno)+'.sh\n')
submit_file.write('queue\n')
#finally close the submit file
submit_file.close()
def __build_condor_files(self,candidates,parameters,candidates_per_job=100):
#prepare list of candidates to be farmed on grid:
self.__prepare_candidates(candidates,candidates_per_job=100)
#make the job files (shell scripts to be executed on the execute nodes)
job_number=0 #run shell script number
for job in self.job_list:
self.__make_job_file(job,job_number)
job_number+=1
#now make the submit file
self.__make_submit_file()
def __delete_remote_files(self,host):
import ssh_utils
command='rm -rf ./*'
ssh_utils.issue_command(host, command)
def __put_multiple_files(self,host,filelist,localdir='/',remotedir='/'):
import ssh_utils
for file in filelist:
localpath=os.path.join(localdir,file)
remotepath=os.path.join(remotedir,file)
ssh_utils.put_file(host,localpath,remotepath)
class NrnProjectCondor(NrnProject):
"""
Run NrnProject-based simulations on a Condor-managed
federated system
"""
def __init__(self,host,username,password,port=80,
local_analysis=False,candidates_per_job=100):
super(NrnProjectCondor,self).__init__()
#other things like the number of nodes to divide the work onto and
#host connection parameters need to go into this constructor
#the more I think about it the less this seems like a good idea
#though
if local_analysis:
self.run=self.__local_run
else:
self.run=self.__remote_run__
#make a context which provides grid utilities
self.context=__CondorContext(host,username,password,port)
self.cpj=candidates_per_job
def __condor_run(self,candidates,parameters):
"""
Run simulations on grid and analyse data locally (???I'm quite confused here...there is a mistake somewhere as the name doesn't match the description - which method is which?)
Once each generation has finished, all data is pulled to local
workstation in form of sqlite databases (1 database per job)
and these are analysed and the fitness estimated sequentially
the fitness array is then returned.
"""
import time
import ssh_utils
#Build submit and runx.sh files, exp_id now corresponds
#to position in chromosome and fitness arrays
self.context.__build_condor_files(candidates,parameters,
candidates_per_job=self.cpj)
#This is a file handling block..
#delete everything in the ssh_utilse directory you're about to put files in
self.__delete_remote_files__()
filelist=os.listdir(self.tmpdir)
#copy local files over, some stuff is missing here as it needs to be an attribute in the condor context
self.__put_multiple_files(filelist,localdir=self.tmpdir)
filelist=os.listdir(self.portableswdir)
#copy portable software files over:
self.__put_multiple_files(filelist,localdir=self.portableswdir)
#issue a command to the message host to issue commands to the grid:
ssh_utils.issue_command(context.messagehost,
'export PATH=/opt/Condor/release/bin:$PATH\ncondor_submit submitfile.submit')
#make a list of the database files we need:
self.jobdbnames=[]
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
self.jobdbnames.append(jobdbname)
#wait till we know file exists:
dbs_created=False
pulled_dbs=[] # list of databases which have been extracted from remote server
while (dbs_created==False):
print('waiting..')
time.sleep(20)
print('checking if dbs created:')
command='ls'
remote_filelist=ssh_utils.issue_command(self.messagehost, command)
for jobdbname in self.jobdbnames:
db_exists=jobdbname+'\n' in remote_filelist
if (db_exists==False):
print(jobdbname+' has not been generated')
dbs_created=False
elif db_exists==True and jobdbname not in pulled_dbs:
print(jobdbname+' has been generated')
remotefile=optimizer_params.remotedir+jobdbname
localpath=os.path.join(self.datadir,str(self.generation)+jobdbname)
ssh_utils.get_file(self.messagehost,remotefile,localpath)
pulled_dbs.append(jobdbname) #so that it is not extracted more than once
#here pop-in the fitness evaluation
if len(pulled_dbs)==len(self.jobdbnames):
dbs_created=True
#this block can be simplified, it need simply return exp_data containers
fitness=[]
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname=str(self.generation)+'outputdb'+str(job_num)+'.sqlite'
dbpath=os.path.join(self.datadir,dbname)
exp_id=CandidateData.exp_id
connection=sqldbutils.db_connect(dbpath) #establish a database connection
query='SELECT numerical_value\
FROM output_params WHERE experiment_id=\
'+str(exp_id)+' AND parameter="fitness"'
exp_fitness=sqldbutils.execute_query(connection,query)
exp_fitness=exp_fitness.fetchall()
exp_fitness=exp_fitness[0][0]
print('Fitness:')
print(exp_fitness)
fitness.append(exp_fitness)
self.generation+=1
return fitness
###ignore this for now###
def __local_evaluate(self,candidates,args):
import time
analysis
self.CandidateData_list=[]
analysis_var=self.analysis_var
#Build submitfile.submit and runx.sh files:
self.__buil_condor_files(candidates) #exp_id now corresponds to position in chromosome/fitness array
fitness=[]
#submit the jobs to the grid
os.chdir(self.tmpdir)
os.system('condor_submit submitfile.submit')
#wait till you know file exists:
dbs_created=False
while (dbs_created==False):
print('checking if dbs created:')
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
db_exists=os.path.exists(jobdbpath)
if (db_exists==False):
time.sleep(60)
dbs_created=False
break
dbs_created=True
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname='/outputdb'+str(job_num)+'.sqlite'
dbpath=self.datadir+dbname
exp_id=CandidateData.exp_id
exp_data=sqldbutils.sim_data(dbpath,exp_id)
analysis=analysis.IClampAnalysis(exp_data.samples,exp_data.t,analysis_var,5000,10000)
exp_fitness=analysis.evaluate_fitness(self.targets,self.weights,cost_function=analysis.normalised_cost_function)
fitness.append(exp_fitness)
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
os.remove(jobdbpath)
return fitness
class SineWaveController(__Controller):
"""
Simple sine wave generator which takes a number of variables ('amp', 'period', 'offset')
and produces an output based on these.
"""
def __init__(self, sim_time, dt):
self.sim_time = sim_time
self.dt = dt
def run_individual(self, sim_var, gen_plot=False, show_plot=False):
"""
Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated.
"""
print(">> Running individual: %s"%(sim_var))
import numpy as np
t = 0
times = []
volts = []
while t <= self.sim_time:
v = sim_var['offset'] + (sim_var['amp'] * (math.sin( 2*math.pi * t/sim_var['period'])))
times.append(t)
volts.append(v)
t += self.dt
if gen_plot:
from matplotlib import pyplot as plt
info = ""
for key in sim_var.keys():
info+="%s=%s "%(key, sim_var[key])
plt.plot(times,volts, label=info)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=1)
if show_plot:
plt.show()
return np.array(times), np.array(volts)
def run(self,candidates,parameters):
"""
Run simulation for each candidate
This run method will loop through each candidate and run the simulation
corresponding to its parameter values. It will populate an array called
traces with the resulting voltage traces for the simulation and return it.
"""
traces = []
for candidate in candidates:
sim_var = dict(zip(parameters,candidate))
t,v = self.run_individual(sim_var)
traces.append([t,v])
return traces
|
vellamike/neurotune
|
neurotune/controllers.py
|
Python
|
bsd-3-clause
| 17,830
|
[
"NEURON"
] |
2ce68aa3babc85e70e722695cc13b853e45912f5e7fcdff5fef0702c5ff3e570
|
'''
PathwayGenie (c) GeneGenie Bioinformatics Ltd. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=no-member
# pylint: disable=too-many-arguments
import RNA
def run(cmd, sequences, temp, dangles, energy_gap=None, bp_x=None, bp_y=None):
'''Runs ViennaRNA.'''
sequences = [str(seq) for seq in sequences]
if cmd == 'mfe':
return _mfe(sequences, temp, dangles)
if cmd == 'subopt':
return _subopt(sequences, energy_gap, temp, dangles)
if cmd == 'energy':
return _energy(sequences, bp_x, bp_y, temp, dangles)
return None
def _mfe(sequences, temp=37.0, dangles='some'):
'''mfe.'''
model = RNA.md()
model.temperature = temp
model.dangles = _get_dangles(dangles)
result = RNA.fold_compound(sequences[0], model).mfe()
bp_x, bp_y = _get_numbered_pairs(result[0])
if bp_x and bp_y:
return [result[1]], [bp_x], [bp_y]
return [0.0], [[]], [[]]
def _subopt(sequences, energy_gap, temp=37.0, dangles='some'):
'''subopt.'''
model = RNA.md()
model.temperature = temp
model.dangles = _get_dangles(dangles)
results = \
RNA.fold_compound('&'.join(sequences), model).subopt(int(energy_gap))
energies = []
bp_xs = []
bp_ys = []
for result in results:
bp_x, bp_y = _get_numbered_pairs(result.structure)
if bp_x and bp_y:
energies.append(result.energy)
bp_xs.append(bp_x)
bp_ys.append(bp_y)
return energies, bp_xs, bp_ys
def _energy(sequences, bp_x, bp_y, temp=37.0, dangles='some'):
'''energy.'''
model = RNA.md()
model.temperature = temp
model.dangles = _get_dangles(dangles)
sequence = '&'.join(sequences)
structure = _get_brackets([len(seq) for seq in sequences], bp_x, bp_y)
return RNA.fold_compound(sequence, model).eval_structure(structure)
def _get_dangles(dangles):
'''Get dangles.'''
return 0 if dangles == 'none' else 1 if dangles == 'some' else 2
def _get_numbered_pairs(bracket_str):
'''_get_numbered_pairs'''
bracket_count = bracket_str.count(')')
if not bracket_count:
return [None, None]
bp_x = []
bp_y = [None for _ in range(bracket_count)]
last_nt_x = []
strand_num = 0
for pos, letter in enumerate(bracket_str):
if letter == '(':
bp_x.append(pos - strand_num)
last_nt_x.append(pos - strand_num)
elif letter == ')':
nt_x = last_nt_x.pop()
nt_x_pos = bp_x.index(nt_x)
bp_y[nt_x_pos] = pos - strand_num
elif letter == '&':
strand_num += 1
return [[pos + 1 for pos in bp_x], [pos + 1 for pos in bp_y]]
def _get_brackets(seq_lens, bp_x, bp_y):
'''_get_brackets'''
bp_x = [pos - 1 for pos in bp_x]
bp_y = [pos - 1 for pos in bp_y]
brackets = []
counter = 0
for seq_len in seq_lens:
for pos in range(counter, seq_len + counter):
if pos in bp_x:
brackets.append('(')
elif pos in bp_y:
brackets.append(')')
else:
brackets.append('.')
counter += seq_len
return ''.join(brackets)
|
neilswainston/PathwayGenie
|
parts_genie/vienna_utils.py
|
Python
|
mit
| 3,317
|
[
"VisIt"
] |
93f387907354379e3b3e501da41e6c41f4be0ce5970d3c70155cee68073aa894
|
# -*- coding: utf-8 -*-
"""
Unitary Event (UE) analysis is a statistical method to analyze in a time
resolved manner excess spike correlation between simultaneously recorded
neurons by comparing the empirical spike coincidences (precision of a few ms)
to the expected number based on the firing rates of the neurons
(see :cite:`unitary_event_analysis-Gruen99_67`).
Background
----------
It has been proposed that cortical neurons organize dynamically into functional
groups (“cell assemblies”) by the temporal structure of their joint spiking
activity. The Unitary Events analysis method detects conspicuous patterns of
synchronous spike activity among simultaneously recorded single neurons. The
statistical significance of a pattern is evaluated by comparing the empirical
number of occurrences to the number expected given the firing rates of the
neurons. Key elements of the method are the proper formulation of the null
hypothesis and the derivation of the corresponding count distribution of
synchronous spike events used in the significance test. The analysis is
performed in a sliding window manner and yields a time-resolved measure of
significant spike synchrony. For further reading, see
:cite:`unitary_event_analysis-Riehle97_1950,unitary_event_analysis-Gruen02_43,\
unitary_event_analysis-Gruen02_81,unitary_event_analysis-Gruen03,\
unitary_event_analysis-Gruen09_1126,unitary_event_analysis-Gruen99_67`.
Tutorial
--------
:doc:`View tutorial <../tutorials/unitary_event_analysis>`
Run tutorial interactively:
.. image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master
?filepath=doc/tutorials/unitary_event_analysis.ipynb
.. current_module elephant.unitary_event_analysis
Functions overview
------------------
.. autosummary::
:toctree: toctree/unitary_event_analysis/
jointJ_window_analysis
:copyright: Copyright 2015-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import sys
import warnings
import neo
import numpy as np
import quantities as pq
import scipy
import elephant.conversion as conv
from elephant.utils import is_binary
__all__ = [
"hash_from_pattern",
"inverse_hash_from_pattern",
"n_emp_mat",
"n_emp_mat_sum_trial",
"n_exp_mat",
"n_exp_mat_sum_trial",
"gen_pval_anal",
"jointJ",
"jointJ_window_analysis"
]
def hash_from_pattern(m, base=2):
"""
Calculate for a spike pattern or a matrix of spike patterns
(provide each pattern as a column) composed of N neurons a
unique number.
Parameters
----------
m: np.ndarray or list
2-dim ndarray
spike patterns represented as a binary matrix (i.e., matrix of 0's and
1's).
Rows and columns correspond to patterns and neurons, respectively.
base: integer
The base for hashes calculation.
Default is 2.
Returns
-------
np.ndarray
An array containing the hash values of each pattern,
shape: (number of patterns).
Raises
------
ValueError
If matrix `m` has wrong orientation.
Examples
--------
With `base=2`, the hash of `[0, 1, 1]` is `0*2^2 + 1*2^1 + 1*2^0 = 3`.
>>> import numpy as np
>>> hash_from_pattern([0, 1, 1])
3
>>> import numpy as np
>>> m = np.array([[0, 1, 0, 0, 1, 1, 0, 1],
... [0, 0, 1, 0, 1, 0, 1, 1],
... [0, 0, 0, 1, 0, 1, 1, 1]])
>>> hash_from_pattern(m)
array([0, 4, 2, 1, 6, 5, 3, 7])
"""
m = np.asarray(m)
n_neurons = m.shape[0]
# check the entries of the matrix
if not is_binary(m):
raise ValueError('Patterns should be binary: 0 or 1')
# generate the representation
# don't use numpy - it's upperbounded by int64
powers = [base ** x for x in range(n_neurons)][::-1]
# calculate the binary number by use of scalar product
return np.dot(powers, m)
def inverse_hash_from_pattern(h, N, base=2):
"""
Calculate the binary spike patterns (matrix) from hash values `h`.
Parameters
----------
h: list of int
Array-like of integer hash values of length of the number of patterns.
N: integer
The number of neurons.
base: integer
The base, used to generate the hash values.
Default is 2.
Returns
-------
m: (N, P) np.ndarray
A matrix of shape: (N, number of patterns)
Raises
------
ValueError
If the hash is not compatible with the number of neurons.
The hash value should not be larger than the largest
possible hash number with the given number of neurons
(e.g. for N = 2, max(hash) = 2^1 + 2^0 = 3, or for N = 4,
max(hash) = 2^3 + 2^2 + 2^1 + 2^0 = 15).
Examples
---------
>>> import numpy as np
>>> h = np.array([3, 7])
>>> N = 4
>>> inverse_hash_from_pattern(h, N)
array([[1, 1],
[1, 1],
[0, 1],
[0, 0]])
"""
h = np.asarray(h) # this will cast to object type if h > int64
if sys.version_info < (3,):
integer_types = (int, long)
else:
integer_types = (int,)
if not all(isinstance(v, integer_types) for v in h.tolist()):
# .tolist() is necessary because np.int[64] is not int
raise ValueError("hash values should be integers")
# check if the hash values are not greater than the greatest possible
# value for N neurons with the given base
powers = np.array([base ** x for x in range(N)])[::-1]
if any(h > sum(powers)):
raise ValueError(
"hash value is not compatible with the number of neurons N")
m = h // np.expand_dims(powers, axis=1)
m %= base # m is a binary matrix now
m = m.astype(int) # convert object to int if the hash was > int64
return m
def n_emp_mat(mat, pattern_hash, base=2):
"""
Count the occurrences of spike coincidence patterns in the given spike
trains.
Parameters
----------
mat : (N, M) np.ndarray
Binned spike trains of N neurons. Rows and columns correspond
to neurons and temporal bins, respectively.
pattern_hash: list of int
List of hash values, representing the spike coincidence patterns
of which occurrences are counted.
base: integer
The base, used to generate the hash values.
Default is 2.
Returns
-------
N_emp: np.ndarray
The number of occurrences of the given patterns in the given
spiketrains.
indices: list of list
List of lists of int.
Indices indexing the bins where the given spike patterns are found
in `mat`. Same length as `pattern_hash`.
`indices[i] = N_emp[i] = pattern_hash[i]`
Raises
------
ValueError
If `mat` is not a binary matrix.
Examples
--------
>>> mat = np.array([[1, 0, 0, 1, 1],
... [1, 0, 0, 1, 0]])
>>> pattern_hash = np.array([1,3])
>>> n_emp, n_emp_indices = n_emp_mat(mat, pattern_hash)
>>> print(n_emp)
[ 0. 2.]
>>> print(n_emp_indices)
[array([]), array([0, 3])]
"""
# check if the mat is zero-one matrix
if not is_binary(mat):
raise ValueError("entries of mat should be either one or zero")
h = hash_from_pattern(mat, base=base)
N_emp = np.zeros(len(pattern_hash))
indices = []
for idx_ph, ph in enumerate(pattern_hash):
indices_tmp = np.where(h == ph)[0]
indices.append(indices_tmp)
N_emp[idx_ph] = len(indices_tmp)
return N_emp, indices
def n_emp_mat_sum_trial(mat, pattern_hash):
"""
Calculate empirical number of observed patterns, summed across trials.
Parameters
----------
mat: np.ndarray
Binned spike trains are represented as a binary matrix (i.e., matrix of
0's and 1's), segmented into trials. Trials should contain an identical
number of neurons and an identical number of time bins.
the entries are zero or one
0-axis --> trials
1-axis --> neurons
2-axis --> time bins
pattern_hash: list of int
Array of hash values of length of the number of patterns.
Returns
-------
N_emp: np.ndarray
The number of occurences of the given spike patterns in the given spike
trains, summed across trials. Same length as `pattern_hash`.
idx_trials: list of int
List of indices of `mat` for each trial in which the specific pattern
has been observed.
0-axis --> trial
1-axis --> list of indices for the chosen trial per entry of
`pattern_hash`
Raises
------
ValueError
If `mat` has the wrong orientation.
If `mat` is not a binary matrix.
Examples
---------
>>> mat = np.array([[[1, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 0, 1]],
... [[1, 1, 1, 1, 1],
... [0, 1, 1, 1, 1],
... [1, 1, 0, 1, 0]]])
>>> pattern_hash = np.array([4,6])
>>> n_emp_sum_trial, n_emp_sum_trial_idx = \
... n_emp_mat_sum_trial(mat, pattern_hash)
>>> n_emp_sum_trial
array([ 1., 3.])
>>> n_emp_sum_trial_idx
[[array([0]), array([3])], [array([], dtype=int64), array([2, 4])]]
"""
num_patt = len(pattern_hash)
N_emp = np.zeros(num_patt)
idx_trials = []
# check if the mat is zero-one matrix
if not is_binary(mat):
raise ValueError("entries of mat should be either one or zero")
for mat_tr in mat:
N_emp_tmp, indices_tmp = n_emp_mat(mat_tr, pattern_hash, base=2)
idx_trials.append(indices_tmp)
N_emp += N_emp_tmp
return N_emp, idx_trials
def _n_exp_mat_analytic(mat, pattern_hash):
"""
Calculates the expected joint probability for each spike pattern
analytically.
"""
marg_prob = np.mean(mat, 1, dtype=float)
# marg_prob needs to be a column vector, so we
# build a two dimensional array with 1 column
# and len(marg_prob) rows
marg_prob = np.expand_dims(marg_prob, axis=1)
n_neurons = mat.shape[0]
m = inverse_hash_from_pattern(pattern_hash, n_neurons)
nrep = m.shape[1]
# multipyling the marginal probability of neurons with regard to the
# pattern
pmat = np.multiply(m, np.tile(marg_prob, (1, nrep))) + \
np.multiply(1 - m, np.tile(1 - marg_prob, (1, nrep)))
return np.prod(pmat, axis=0) * float(mat.shape[1])
def _n_exp_mat_surrogate(mat, pattern_hash, n_surr=1):
"""
Calculates the expected joint probability for each spike pattern with spike
time randomization surrogate
"""
if len(pattern_hash) > 1:
raise ValueError('surrogate method works only for one pattern!')
N_exp_array = np.zeros(n_surr)
for rz_idx, rz in enumerate(np.arange(n_surr)):
# row-wise shuffling all elements of zero-one matrix
mat_surr = np.copy(mat)
[np.random.shuffle(row) for row in mat_surr]
N_exp_array[rz_idx] = n_emp_mat(mat_surr, pattern_hash)[0][0]
return N_exp_array
def n_exp_mat(mat, pattern_hash, method='analytic', n_surr=1):
"""
Calculates the expected joint probability for each spike pattern.
Parameters
----------
mat: np.ndarray
The entries are in the range [0, 1].
The only possibility when the entries are floating point values is
when the `mat` is calculated with the flag `analytic_TrialAverage`
in `n_exp_mat_sum_trial()`.
Otherwise, the entries are binary.
0-axis --> neurons
1-axis --> time bins
pattern_hash: list of int
List of hash values, length: number of patterns
method: {'analytic', 'surr'}, optional
The method with which the expectation is calculated.
'analytic' -- > analytically
'surr' -- > with surrogates (spike time randomization)
Default is 'analytic'.
n_surr: int
number of surrogates for constructing the distribution of expected
joint probability.
Default is 1 and this number is needed only when method = 'surr'
Returns
-------
np.ndarray
if method is 'analytic':
An array containing the expected joint probability of each pattern,
shape: (number of patterns,)
if method is 'surr':
0-axis --> different realizations, length = number of surrogates
1-axis --> patterns
Raises
------
ValueError
If `mat` has the wrong orientation.
Examples
--------
>>> mat = np.array([[1, 1, 1, 1],
... [0, 1, 0, 1],
... [0, 0, 1, 0]])
>>> pattern_hash = np.array([5,6])
>>> n_exp_anal = n_exp_mat(mat, pattern_hash, method='analytic')
>>> n_exp_anal
[ 0.5 1.5 ]
>>> n_exp_surr = n_exp_mat(mat, pattern_hash, method='surr', n_surr=5000)
>>> print(n_exp_surr)
[[ 1. 1.]
[ 2. 0.]
[ 2. 0.]
...,
[ 2. 0.]
[ 2. 0.]
[ 1. 1.]]
"""
# check if the mat is in the range [0, 1]
if not np.all((mat >= 0) & (mat <= 1)):
raise ValueError("entries of mat should be in range [0, 1]")
if method == 'analytic':
return _n_exp_mat_analytic(mat, pattern_hash)
if method == 'surr':
return _n_exp_mat_surrogate(mat, pattern_hash, n_surr=n_surr)
def n_exp_mat_sum_trial(mat, pattern_hash, method='analytic_TrialByTrial',
n_surr=1):
"""
Calculates the expected joint probability for each spike pattern sum over
trials.
Parameters
----------
mat: np.ndarray
Binned spike trains represented as a binary matrix (i.e., matrix of
0's and 1's), segmented into trials. Trials should contain an identical
number of neurons and an identical number of time bins.
The entries of mat should be a list of a list where 0-axis is trials
and 1-axis is neurons.
0-axis --> trials
1-axis --> neurons
2-axis --> time bins
pattern_hash: list of int
List of hash values, length: number of patterns
method: str
method with which the unitary events whould be computed
'analytic_TrialByTrial' -- > calculate the expectency
(analytically) on each trial, then sum over all trials.
'analytic_TrialAverage' -- > calculate the expectency
by averaging over trials.
(cf. Gruen et al. 2003)
'surrogate_TrialByTrial' -- > calculate the distribution
of expected coincidences by spike time randomzation in
each trial and sum over trials.
Default is 'analytic_trialByTrial'.
n_surr: int, optional
The number of surrogate to be used.
Default is 1.
Returns
-------
n_exp: np.ndarray
An array containing the expected joint probability of
each pattern summed over trials,shape: (number of patterns,)
Raises
------
ValueError
If `method` is not one of the specified above.
Examples
--------
>>> mat = np.array([[[1, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 0, 1]],
... [[1, 1, 1, 1, 1],
... [0, 1, 1, 1, 1],
... [1, 1, 0, 1, 0]]])
>>> pattern_hash = np.array([5,6])
>>> n_exp_anal = n_exp_mat_sum_trial(mat, pattern_hash)
>>> print(n_exp_anal)
array([ 1.56, 2.56])
"""
if method == 'analytic_TrialByTrial':
n_exp = np.zeros(len(pattern_hash))
for mat_tr in mat:
n_exp += n_exp_mat(mat_tr, pattern_hash,
method='analytic')
elif method == 'analytic_TrialAverage':
n_exp = n_exp_mat(
np.mean(mat, axis=0), pattern_hash,
method='analytic') * mat.shape[0]
elif method == 'surrogate_TrialByTrial':
n_exp = np.zeros(n_surr)
for mat_tr in mat:
n_exp += n_exp_mat(mat_tr, pattern_hash,
method='surr', n_surr=n_surr)
else:
raise ValueError(
"The method only works on the zero_one matrix at the moment")
return n_exp
def gen_pval_anal(mat, pattern_hash, method='analytic_TrialByTrial',
n_surr=1):
"""
Compute the expected coincidences and a function to calculate the
p-value for the given empirical coincidences.
This function generates a poisson distribution with the expected
value calculated by `mat`. It returns a function that gets
the empirical coincidences, `n_emp`, and calculates a p-value
as the area under the poisson distribution from `n_emp` to infinity.
Parameters
----------
mat: np.ndarray
Binned spike trains represented as a binary matrix (i.e., matrix of
0's and 1's), segmented into trials. Trials should contain an identical
number of neurons and an identical number of time bins.
The entries of mat should be a list of a list where 0-axis is trials
and 1-axis is neurons.
0-axis --> trials
1-axis --> neurons
2-axis --> time bins
pattern_hash: list of int
List of hash values, length: number of patterns
method: string
method with which the unitary events whould be computed
'analytic_TrialByTrial' -- > calculate the expectency
(analytically) on each trial, then sum over all trials.
''analytic_TrialAverage' -- > calculate the expectency
by averaging over trials.
Default is 'analytic_trialByTrial'
(cf. Gruen et al. 2003)
n_surr: integer, optional
number of surrogate to be used
Default is 1
Returns
--------
pval_anal: callable
The function that calculates the p-value for the given empirical
coincidences.
n_exp: list
List of expected coincidences.
Raises
------
ValueError
If `method` is not one of the specified above.
Examples
--------
>>> mat = np.array([[[1, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 0, 1]],
... [[1, 1, 1, 1, 1],
... [0, 1, 1, 1, 1],
... [1, 1, 0, 1, 0]]])
>>> pattern_hash = np.array([5, 6])
>>> pval_anal, n_exp = gen_pval_anal(mat, pattern_hash)
>>> n_exp
array([ 1.56, 2.56])
"""
if method == 'analytic_TrialByTrial' or method == 'analytic_TrialAverage':
n_exp = n_exp_mat_sum_trial(mat, pattern_hash, method=method)
def pval(n_emp):
p = 1. - scipy.special.gammaincc(n_emp, n_exp)
return p
elif method == 'surrogate_TrialByTrial':
n_exp = n_exp_mat_sum_trial(
mat, pattern_hash, method=method, n_surr=n_surr)
def pval(n_emp):
hist = np.bincount(np.int64(n_exp))
exp_dist = hist / float(np.sum(hist))
if len(n_emp) > 1:
raise ValueError('In surrogate method the p_value can be'
'calculated only for one pattern!')
return np.sum(exp_dist[int(n_emp[0]):])
else:
raise ValueError("Method is not allowed: {method}".format(
method=method))
return pval, n_exp
def jointJ(p_val):
"""
Surprise measurement.
Logarithmic transformation of joint-p-value into surprise measure
for better visualization as the highly significant events are
indicated by very low joint-p-values.
Parameters
----------
p_val: list of float
List of p-values of statistical tests for different pattern.
Returns
-------
Js: list of float
List of surprise measures.
Examples
--------
>>> p_val = np.array([0.31271072, 0.01175031])
>>> jointJ(p_val)
array([0.3419968 , 1.92481736])
"""
p_arr = np.asarray(p_val)
Js = np.log10(1 - p_arr) - np.log10(p_arr)
return Js
def _rate_mat_avg_trial(mat):
"""
Calculates the average firing rate of each neurons across trials.
"""
n_trials, n_neurons, n_bins = np.shape(mat)
psth = np.zeros(n_neurons, dtype=np.float32)
for tr, mat_tr in enumerate(mat):
psth += np.sum(mat_tr, axis=1)
return psth / (n_bins * n_trials)
def _bintime(t, bin_size):
"""
Change the real time to `bin_size` units.
"""
t_dl = t.rescale('ms').magnitude
bin_size_dl = bin_size.rescale('ms').magnitude
return np.floor(np.array(t_dl) / bin_size_dl).astype(int)
def _winpos(t_start, t_stop, winsize, winstep, position='left-edge'):
"""
Calculate the position of the analysis window.
"""
t_start_dl = t_start.rescale('ms').magnitude
t_stop_dl = t_stop.rescale('ms').magnitude
winsize_dl = winsize.rescale('ms').magnitude
winstep_dl = winstep.rescale('ms').magnitude
# left side of the window time
if position == 'left-edge':
ts_winpos = np.arange(
t_start_dl, t_stop_dl - winsize_dl + winstep_dl,
winstep_dl) * pq.ms
else:
raise ValueError(
'the current version only returns left-edge of the window')
return ts_winpos
def _UE(mat, pattern_hash, method='analytic_TrialByTrial', n_surr=1):
"""
Return the default results of unitary events analysis
(Surprise, empirical coincidences and index of where it happened
in the given mat, n_exp and average rate of neurons)
"""
rate_avg = _rate_mat_avg_trial(mat)
n_emp, indices = n_emp_mat_sum_trial(mat, pattern_hash)
if method == 'surrogate_TrialByTrial':
dist_exp, n_exp = gen_pval_anal(
mat, pattern_hash, method, n_surr=n_surr)
n_exp = np.mean(n_exp)
elif method == 'analytic_TrialByTrial' or \
method == 'analytic_TrialAverage':
dist_exp, n_exp = gen_pval_anal(mat, pattern_hash, method)
pval = dist_exp(n_emp)
Js = jointJ(pval)
return Js, rate_avg, n_exp, n_emp, indices
def jointJ_window_analysis(
data, bin_size, winsize, winstep, pattern_hash,
method='analytic_TrialByTrial', t_start=None,
t_stop=None, binary=True, n_surr=100):
"""
Calculates the joint surprise in a sliding window fashion.
Implementation is based on :cite:`unitary_event_analysis-Gruen99_67`.
Parameters
----------
data : list
A list of spike trains (`neo.SpikeTrain` objects) in different trials:
0-axis --> Trials
1-axis --> Neurons
2-axis --> Spike times
bin_size : pq.Quantity
The size of bins for discretizing spike trains.
winsize : pq.Quantity
The size of the window of analysis.
winstep : pq.Quantity
The size of the window step.
pattern_hash : list of int
list of interested patterns in hash values
(see `hash_from_pattern` and `inverse_hash_from_pattern` functions)
method : str
The method with which the unitary events whould be computed
'analytic_TrialByTrial' -- > calculate the expectency
(analytically) on each trial, then sum over all trials.
'analytic_TrialAverage' -- > calculate the expectency
by averaging over trials (cf. Gruen et al. 2003).
'surrogate_TrialByTrial' -- > calculate the distribution
of expected coincidences by spike time randomzation in
each trial and sum over trials.
Default is 'analytic_trialByTrial'
t_start : float or pq.Quantity, optional
The start time to use for the time points.
If not specified, retrieved from the `t_start` attribute of
spiketrains.
t_stop : float or pq.Quantity, optional
The start time to use for the time points.
If not specified, retrieved from the `t_stop` attribute of
spiketrains.
n_surr : int, optional
The number of surrogates to be used.
Default is 100.
Returns
-------
dict
The values of each key has the shape of
different pattern hash --> 0-axis
different window --> 1-axis
Js: list of float
JointSurprise of different given patterns within each window.
indices: list of list of int
A list of indices of pattern within each window.
n_emp: list of int
The empirical number of each observed pattern.
n_exp: list of float
The expected number of each pattern.
rate_avg: list of float
The average firing rate of each neuron.
Raises
------
ValueError
If `data` is not in the format, specified above.
NotImplementedError
If `binary` is not True. The method works only with binary matrices at
the moment.
Warns
-----
UserWarning
The ratio between `winsize` or `winstep` and `bin_size` is not an
integer.
"""
if not isinstance(data[0][0], neo.SpikeTrain):
raise ValueError(
"structure of the data is not correct: 0-axis should be trials, "
"1-axis units and 2-axis neo spike trains")
if t_start is None:
t_start = data[0][0].t_start.rescale('ms')
if t_stop is None:
t_stop = data[0][0].t_stop.rescale('ms')
# position of all windows (left edges)
t_winpos = _winpos(t_start, t_stop, winsize, winstep, position='left-edge')
t_winpos_bintime = _bintime(t_winpos, bin_size)
winsize_bintime = _bintime(winsize, bin_size)
winstep_bintime = _bintime(winstep, bin_size)
if winsize_bintime * bin_size != winsize:
warnings.warn("The ratio between the winsize ({winsize}) and the "
"bin_size ({bin_size}) is not an integer".format(
winsize=winsize,
bin_size=bin_size))
if winstep_bintime * bin_size != winstep:
warnings.warn("The ratio between the winstep ({winstep}) and the "
"bin_size ({bin_size}) is not an integer".format(
winstep=winstep,
bin_size=bin_size))
num_tr, N = np.shape(data)[:2]
n_bins = int((t_stop - t_start) / bin_size)
mat_tr_unit_spt = np.zeros((len(data), N, n_bins))
for tr, sts in enumerate(data):
sts = list(sts)
bs = conv.BinnedSpikeTrain(
sts, t_start=t_start, t_stop=t_stop, bin_size=bin_size)
if binary is True:
mat = bs.to_bool_array()
else:
raise NotImplementedError(
"The method works only with binary matrices at the moment")
mat_tr_unit_spt[tr] = mat
num_win = len(t_winpos)
Js_win, n_exp_win, n_emp_win = (np.zeros(num_win) for _ in range(3))
rate_avg = np.zeros((num_win, N))
indices_win = {}
for i in range(num_tr):
indices_win['trial' + str(i)] = []
for i, win_pos in enumerate(t_winpos_bintime):
mat_win = mat_tr_unit_spt[:, :, win_pos:win_pos + winsize_bintime]
if method == 'surrogate_TrialByTrial':
Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
i], indices_lst = _UE(
mat_win, pattern_hash, method, n_surr=n_surr)
else:
Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
i], indices_lst = _UE(mat_win, pattern_hash, method)
for j in range(num_tr):
if len(indices_lst[j][0]) > 0:
indices_win[
'trial' + str(j)] = np.append(
indices_win['trial' + str(j)], indices_lst[j][0] + win_pos)
return {'Js': Js_win, 'indices': indices_win, 'n_emp': n_emp_win,
'n_exp': n_exp_win, 'rate_avg': rate_avg / bin_size}
|
alperyeg/elephant
|
elephant/unitary_event_analysis.py
|
Python
|
bsd-3-clause
| 28,001
|
[
"NEURON"
] |
05d30f6eaf7a6268d866dacdba24f472c197a96e3caefc60fb2849e0f48d00a7
|
import numpy as np
from ase import Atoms
from gpaw import GPAW
from gpaw.wavefunctions.pw import PW
from gpaw.test import equal
bulk = Atoms('Li', pbc=True)
k = 4
calc = GPAW(mode=PW(200), kpts=(k, k, k), eigensolver='rmm-diis')
bulk.set_calculator(calc)
e = []
niter = []
A = [2.6, 2.65, 2.7, 2.75, 2.8]
for a in A:
bulk.set_cell((a, a, a))
e.append(bulk.get_potential_energy())
a = np.roots(np.polyder(np.polyfit(A, e, 2), 1))[0]
print 'a =', a
equal(a, 2.65247379609, 0.001)
|
robwarm/gpaw-symm
|
gpaw/test/pw/bulk.py
|
Python
|
gpl-3.0
| 488
|
[
"ASE",
"GPAW"
] |
bc3361a389c903f40dcc01dc485614ef74cf0727452bb2753b7f16f403236e9f
|
import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoLayerQG
from numpy import pi
params = {
'f0' : 1.0e-4,
'Lx' : 1.0e6,
'beta' : 1.5e-11,
'defRadius' : 1.5e4,
'H1' : 500.0,
'H2' : 2000.0,
'U1' : 2.5e-2,
'U2' : 0.0,
'bottomDrag' : 1.0e-7,
'nx' : 128,
'dt' : 5.0e3,
'visc' : 4.0e8,
'viscOrder' : 4.0,
'timeStepper': 'AB3',
'nThreads' : 4,
'useFilter' : False,
}
# Create the two-layer model
qg = twoLayerQG.model(**params)
qg.describe_model()
# Initial condition:
Ro = 1.0e-3
f0 = 1.0e-4
q1 = Ro*f0*np.random.standard_normal(qg.physVarShape)
q2 = Ro*f0*np.random.standard_normal(qg.physVarShape)
qg.set_q1_and_q2(q1, q2)
# Gaussian hill topography
(x0, y0) = (qg.Lx/2.0, qg.Ly/2.0)
rTop = qg.Lx/20.0
h = 0.1*qg.H2*np.exp( -( (qg.x-x0)**2.0 + (qg.y-y0)**2.0 )/(2.0*rTop**2.0) )
qg.set_topography(h)
# Run a loop
nt = 1e3
for ii in np.arange(0, 1e3):
qg.step_nSteps(nSteps=nt, dnLog=nt)
qg.update_state_variables()
fig = plt.figure('Perturbation vorticity', figsize=(8, 8)); plt.clf()
plt.subplot(221); plt.imshow(qg.q1)
plt.subplot(222); plt.imshow(qg.q2)
plt.subplot(223); plt.imshow(np.abs(qg.soln[0:qg.ny//2, :, 0]))
plt.subplot(224); plt.imshow(np.abs(qg.soln[0:qg.ny//2, :, 1]))
plt.pause(0.01), plt.draw()
print("Close the plot to end the program")
plt.show()
|
glwagner/py2Periodic
|
tests/twoLayerQG/testTwoLayerTopography.py
|
Python
|
mit
| 1,545
|
[
"Gaussian"
] |
7dd55bea08f080aca4f4d7d2b819460d7e559f3ba0689103ef3795721d337a06
|
# $ ipython --gui=wx
# In [1]: %run visualization/plot-2.py
# In [2]: plot("z_dual_1.norms")
from mayavi import mlab
import numpy
import re
def mycolor(x):
"""Returns a color vector (a triple of floats) based on x, where x is
in the range [0, 1].
"""
lut = [
[ 0, 0, 0, 255],
[ 1, 0, 0, 255],
[ 2, 0, 0, 255],
[ 4, 0, 0, 255],
[ 5, 0, 0, 255],
[ 6, 0, 0, 255],
[ 8, 0, 0, 255],
[ 9, 0, 0, 255],
[ 10, 0, 0, 255],
[ 12, 0, 0, 255],
[ 14, 0, 0, 255],
[ 16, 0, 0, 255],
[ 17, 0, 0, 255],
[ 18, 0, 0, 255],
[ 20, 0, 0, 255],
[ 21, 0, 0, 255],
[ 23, 0, 0, 255],
[ 24, 0, 0, 255],
[ 26, 0, 0, 255],
[ 27, 0, 0, 255],
[ 28, 0, 0, 255],
[ 29, 0, 0, 255],
[ 31, 0, 0, 255],
[ 32, 0, 0, 255],
[ 33, 0, 0, 255],
[ 35, 0, 0, 255],
[ 36, 0, 0, 255],
[ 37, 0, 0, 255],
[ 39, 0, 0, 255],
[ 40, 0, 0, 255],
[ 42, 0, 0, 255],
[ 43, 0, 0, 255],
[ 46, 0, 0, 255],
[ 47, 0, 0, 255],
[ 48, 0, 0, 255],
[ 50, 0, 0, 255],
[ 51, 0, 0, 255],
[ 53, 0, 0, 255],
[ 54, 0, 0, 255],
[ 55, 0, 0, 255],
[ 56, 0, 0, 255],
[ 58, 0, 0, 255],
[ 59, 0, 0, 255],
[ 60, 0, 0, 255],
[ 62, 0, 0, 255],
[ 63, 0, 0, 255],
[ 65, 0, 0, 255],
[ 66, 0, 0, 255],
[ 68, 0, 0, 255],
[ 69, 0, 0, 255],
[ 70, 0, 0, 255],
[ 71, 0, 0, 255],
[ 73, 0, 0, 255],
[ 74, 0, 0, 255],
[ 77, 0, 0, 255],
[ 78, 0, 0, 255],
[ 80, 0, 0, 255],
[ 81, 0, 0, 255],
[ 82, 0, 0, 255],
[ 84, 0, 0, 255],
[ 85, 0, 0, 255],
[ 86, 0, 0, 255],
[ 88, 0, 0, 255],
[ 89, 0, 0, 255],
[ 91, 0, 0, 255],
[ 93, 0, 0, 255],
[ 95, 0, 0, 255],
[ 96, 0, 0, 255],
[ 97, 0, 0, 255],
[ 98, 0, 0, 255],
[100, 0, 0, 255],
[101, 0, 0, 255],
[102, 0, 0, 255],
[104, 0, 0, 255],
[105, 0, 0, 255],
[108, 0, 0, 255],
[110, 0, 0, 255],
[111, 0, 0, 255],
[113, 0, 0, 255],
[114, 0, 0, 255],
[115, 0, 0, 255],
[116, 0, 0, 255],
[118, 0, 0, 255],
[119, 0, 0, 255],
[120, 0, 0, 255],
[122, 0, 0, 255],
[123, 0, 0, 255],
[124, 0, 0, 255],
[126, 0, 0, 255],
[127, 0, 0, 255],
[128, 0, 0, 255],
[130, 0, 0, 255],
[131, 0, 0, 255],
[133, 0, 0, 255],
[134, 0, 0, 255],
[135, 0, 0, 255],
[138, 0, 0, 255],
[140, 0, 0, 255],
[140, 0, 0, 255],
[142, 0, 0, 255],
[143, 0, 0, 255],
[145, 0, 0, 255],
[146, 0, 0, 255],
[147, 0, 0, 255],
[149, 0, 0, 255],
[150, 0, 0, 255],
[152, 0, 0, 255],
[153, 0, 0, 255],
[155, 0, 0, 255],
[156, 0, 0, 255],
[157, 0, 0, 255],
[158, 0, 0, 255],
[160, 0, 0, 255],
[161, 0, 0, 255],
[162, 0, 0, 255],
[164, 0, 0, 255],
[165, 0, 0, 255],
[167, 0, 0, 255],
[169, 0, 0, 255],
[170, 0, 0, 255],
[172, 0, 0, 255],
[173, 0, 0, 255],
[175, 1, 0, 255],
[176, 3, 0, 255],
[177, 4, 0, 255],
[179, 6, 0, 255],
[180, 8, 0, 255],
[182, 10, 0, 255],
[183, 13, 0, 255],
[185, 16, 0, 255],
[187, 17, 0, 255],
[188, 19, 0, 255],
[189, 20, 0, 255],
[191, 22, 0, 255],
[192, 24, 0, 255],
[194, 26, 0, 255],
[195, 28, 0, 255],
[197, 30, 0, 255],
[198, 32, 0, 255],
[200, 34, 0, 255],
[202, 36, 0, 255],
[203, 38, 0, 255],
[205, 40, 0, 255],
[206, 42, 0, 255],
[207, 44, 0, 255],
[209, 46, 0, 255],
[210, 48, 0, 255],
[211, 49, 0, 255],
[212, 51, 0, 255],
[214, 52, 0, 255],
[215, 54, 0, 255],
[217, 56, 0, 255],
[218, 58, 0, 255],
[220, 60, 0, 255],
[221, 61, 0, 255],
[222, 63, 0, 255],
[224, 65, 0, 255],
[225, 67, 0, 255],
[226, 68, 0, 255],
[227, 70, 0, 255],
[229, 72, 0, 255],
[232, 76, 0, 255],
[233, 77, 0, 255],
[234, 79, 0, 255],
[236, 81, 0, 255],
[237, 83, 0, 255],
[239, 85, 0, 255],
[240, 86, 0, 255],
[241, 88, 0, 255],
[242, 89, 0, 255],
[244, 91, 0, 255],
[245, 93, 0, 255],
[247, 95, 0, 255],
[248, 97, 0, 255],
[249, 99, 0, 255],
[251, 101, 0, 255],
[252, 102, 0, 255],
[253, 103, 0, 255],
[255, 105, 0, 255],
[255, 107, 0, 255],
[255, 109, 0, 255],
[255, 111, 0, 255],
[255, 114, 0, 255],
[255, 117, 0, 255],
[255, 118, 0, 255],
[255, 120, 0, 255],
[255, 121, 0, 255],
[255, 123, 0, 255],
[255, 125, 0, 255],
[255, 127, 0, 255],
[255, 129, 0, 255],
[255, 131, 0, 255],
[255, 133, 1, 255],
[255, 136, 8, 255],
[255, 137, 11, 255],
[255, 139, 15, 255],
[255, 141, 19, 255],
[255, 143, 22, 255],
[255, 145, 26, 255],
[255, 146, 30, 255],
[255, 148, 34, 255],
[255, 150, 37, 255],
[255, 152, 41, 255],
[255, 154, 47, 255],
[255, 157, 52, 255],
[255, 159, 55, 255],
[255, 161, 59, 255],
[255, 162, 63, 255],
[255, 164, 67, 255],
[255, 166, 70, 255],
[255, 168, 74, 255],
[255, 170, 78, 255],
[255, 171, 81, 255],
[255, 173, 85, 255],
[255, 174, 89, 255],
[255, 176, 93, 255],
[255, 178, 96, 255],
[255, 180, 100, 255],
[255, 182, 103, 255],
[255, 184, 107, 255],
[255, 186, 110, 255],
[255, 187, 114, 255],
[255, 188, 118, 255],
[255, 190, 122, 255],
[255, 192, 126, 255],
[255, 196, 133, 255],
[255, 198, 137, 255],
[255, 200, 140, 255],
[255, 202, 144, 255],
[255, 203, 148, 255],
[255, 205, 152, 255],
[255, 206, 155, 255],
[255, 208, 158, 255],
[255, 210, 162, 255],
[255, 212, 166, 255],
[255, 214, 169, 255],
[255, 216, 173, 255],
[255, 217, 177, 255],
[255, 219, 181, 255],
[255, 221, 184, 255],
[255, 222, 188, 255],
[255, 224, 192, 255],
[255, 226, 195, 255],
[255, 228, 199, 255],
[255, 229, 203, 255],
[255, 231, 206, 255],
[255, 234, 212, 255],
[255, 237, 217, 255],
[255, 238, 221, 255],
[255, 240, 225, 255],
[255, 242, 228, 255],
[255, 244, 232, 255],
[255, 245, 236, 255],
[255, 247, 240, 255],
[255, 249, 243, 255],
[255, 251, 247, 255]
]
if(x < 0 or x > 1):
raise Exception("illegal scale")
i = int(x*255)
return (lut[i][0]/255., lut[i][1]/255., lut[i][2]/255.)
def stratify(number_bins, norms, *args):
"""Stratify the lists by norms into number_bins strata.
The args contain the centers and the widths, i.e. args <- center_i,
center_j, width_i, width_j. The function returns a tuple (norms,
centers) of the stratified result.
"""
length = len(norms)
for i in range(len(args)):
if len(args[i]) != length:
print(norms)
print(i)
print(args[i])
raise Exception("All lengths have to match")
min_norm = numpy.amin(norms)
max_norm = numpy.amax(norms)
def bound(i):
return min_norm+i*(max_norm-min_norm)/float(number_bins)
args_stratified = [ [ [] for j in range(number_bins) ] for i in range(len(args)) ]
norms_stratified = [ [] for i in range(number_bins) ]
print("stratifying into {:d} bins".format(number_bins))
for i in range(len(norms)):
found_bin = False
for j in range(number_bins):
if norms[i] >= bound(j) and norms[i] < bound(j+1):
for k in range(len(args)):
args_stratified[k][j].append(args[k][i])
norms_stratified[j].append(norms[i])
found_bin = True
break
if not found_bin:
for k in range(len(args)):
args_stratified[k][number_bins-1].append(args[k][i])
norms_stratified[j].append(norms[i])
# for i in range(number_bins):
# print("{:d} norm [{:1.2f},{:1.2f})".format(
# len(args_stratified[0][i]), bound(i), bound(i+1)))
result = [ norms_stratified ]
for arg in args_stratified:
result.append(arg)
return result
def read_squares(fd, start, end=None):
"""Reads a norms file from a call to spamm_tree_print_leaves_2d_symm().
"""
# Use readline() with a length argument so we can tell whether the
# file as reached EOF.
LINE_LENGTH = 1000
i = []
j = []
width_i = []
width_j = []
norm = []
re_matrix_square = re.compile("^\s*([0-9.eEdD+-]+)"
+ "\s+([0-9.eEdD+-]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9.eEdD+-]+)$")
while True:
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
return None
if start.search(line):
matrix_name = line.rstrip()
break
line = fd.readline()
block_size = int(line)
while True:
old_position = fd.tell()
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
break
if end != None:
if end.search(line):
fd.seek(old_position)
break
result = re_matrix_square.search(line)
i.append(float(result.group(1)))
j.append(float(result.group(2)))
width_i.append(int(result.group(3)))
width_j.append(int(result.group(4)))
norm.append(float(result.group(5)))
print("loaded {:d} matrix squares from {:s}".format(len(i), matrix_name))
result = (block_size, i, j, width_i, width_j, norm)
#print(result)
return result
def read_cubes(fd, start, end=None):
"""Reads a norms file from a call to spamm_tree_print_leaves_2d_symm().
"""
# Use readline() with a length argument so we can tell whether the
# file as reached EOF.
LINE_LENGTH = 1000
i = []
j = []
k = []
width_i = []
width_j = []
width_k = []
norm = []
re_product_cube = re.compile("^\s*([0-9.eEdD+-]+)"
+ "\s+([0-9.eEdD+-]+)"
+ "\s+([0-9.eEdD+-]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9]+)"
+ "\s+([0-9.eEdD+-]+)$")
while True:
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
return None
if start.search(line):
matrix_name = line.rstrip()
break
line = fd.readline()
block_size = int(line)
while True:
old_position = fd.tell()
line = fd.readline(LINE_LENGTH)
if len(line) == 0:
break
if end != None:
if end.search(line):
fd.seek(old_position)
break
result = re_product_cube.search(line)
i.append(float(result.group(1)))
j.append(float(result.group(2)))
k.append(float(result.group(3)))
width_i.append(int(result.group(4)))
width_j.append(int(result.group(5)))
width_k.append(int(result.group(6)))
norm.append(float(result.group(7)))
print("loaded {:d} product cubes from {:s}".format(len(i), matrix_name))
return (i, j, k, width_i, width_j, width_k, norm)
@mlab.show
def plot(filename, number_bins=6):
"""Plot the cubes from a file.
The cubes are stratified into number_bins norm bins. The
transparency of the cubes is set depending on which norm bin the
cube is in.
"""
re_matrix_A = re.compile("^\s*Matrix A$")
re_matrix_B = re.compile("^\s*Matrix B$")
re_matrix_C = re.compile("^\s*Matrix C$")
re_product_space = re.compile("^\s*Product Space$")
fd = open(filename)
(block_size, A_i, A_j,
A_width_i, A_width_j, A_norm) = read_squares(fd, re_matrix_A, end=re_matrix_B)
(block_size, B_i, B_j,
B_width_i, B_width_j, B_norm) = read_squares(fd, re_matrix_B, end=re_matrix_C)
(block_size, C_i, C_j,
C_width_i, C_width_j, C_norm) = read_squares(fd, re_matrix_C, end=re_product_space)
(prod_i, prod_j, prod_k,
prod_width_i, prod_width_j, prod_width_k, prod_norm) = read_cubes(fd, re_product_space)
# Get the current figure.
figure = mlab.gcf()
# Get the engine.
engine = mlab.get_engine()
# Clean the figure.
mlab.clf()
# Turn off rendering (for performance).
figure.scene.disable_render = True
# Tune background color.
figure.scene.background = (1., 1., 1.)
# Stratify matrix squares.
(norms_stratified,
A_i_stratified, A_j_stratified,
A_width_i_stratified, A_width_j_stratified) = stratify(number_bins, A_norm,
A_i, A_j, A_width_i, A_width_j)
# Add matrices.
print("Plotting matrix A")
for i in range(number_bins):
if len(A_i_stratified[i]) > 0:
points = mlab.points3d(A_i_stratified[i],
[1 for j in range(len(A_i_stratified[i]))],
A_j_stratified[i],
mode='cube',
color=(0.0, 0.5019607843137255, 0.5019607843137255),
scale_factor=1,
opacity=0.5*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = block_size
points.glyph.glyph_source.glyph_source.y_length = 0
points.glyph.glyph_source.glyph_source.z_length = block_size
(norms_stratified,
B_i_stratified, B_j_stratified,
B_width_i_stratified, B_width_j_stratified) = stratify(number_bins, B_norm,
B_i, B_j, B_width_i, B_width_j)
# Add matrices.
print("Plotting matrix B")
for i in range(number_bins):
if len(B_i_stratified[i]) > 0:
points = mlab.points3d([1 for j in range(len(B_i_stratified[i]))],
B_j_stratified[i],
B_i_stratified[i],
mode='cube',
color=(0.5019607843137255, 0.0, 0.0),
scale_factor=1,
opacity=0.5*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = 0
points.glyph.glyph_source.glyph_source.y_length = block_size
points.glyph.glyph_source.glyph_source.z_length = block_size
(norms_stratified,
C_i_stratified, C_j_stratified,
C_width_i_stratified, C_width_j_stratified) = stratify(number_bins, C_norm,
C_i, C_j, C_width_i, C_width_j)
# Add matrices.
print("Plotting matrix C")
for i in range(number_bins):
if len(C_i_stratified[i]) > 0:
points = mlab.points3d(C_i_stratified[i],
C_j_stratified[i],
[1 for j in range(len(C_i_stratified[i]))],
mode='cube',
color=(0.5019607843137255, 0.0, 0.5019607843137255),
scale_factor=1,
opacity=0.5*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = block_size
points.glyph.glyph_source.glyph_source.y_length = block_size
points.glyph.glyph_source.glyph_source.z_length = 0
# Stratify cubes by norm.
(norms_stratified, prod_i_stratified, prod_j_stratified, prod_k_stratified) = stratify(
number_bins, prod_norm, prod_i, prod_j, prod_k)
# Add cubes.
print("Plotting product cubes")
for i in range(number_bins):
if len(prod_i_stratified[i]) > 0:
points = mlab.points3d(prod_i_stratified[i],
prod_j_stratified[i],
prod_k_stratified[i],
mode='cube',
color=(0.2,.2,.2),
scale_factor=1,
opacity=0.75*(i+1)/float(number_bins))
points.glyph.glyph_source.glyph_source.x_length = block_size
points.glyph.glyph_source.glyph_source.y_length = block_size
points.glyph.glyph_source.glyph_source.z_length = block_size
i_max = max(numpy.amax(prod_i), numpy.amax(prod_j), numpy.amax(prod_k))+block_size/2
print("i_max = {:e}".format(i_max))
# Insert fake invisible data-set for axes.
mlab.points3d([1, i_max], [1, i_max], [1, i_max], mode='cube', scale_factor=0)
#mlab.axes(xlabel="i", ylabel="j", zlabel="k", extent=[1, xmax, 1, xmax, 1, xmax])
# Box around the whole thing.
mlab.outline(extent=[1, i_max, 1, i_max, 1, i_max])
outline = engine.scenes[0].children[-1].children[0].children[1]
outline.actor.property.color = (0, 0, 0)
outline.actor.property.line_width = 2
# Add axes.
from mayavi.modules.axes import Axes
axes = Axes()
engine.add_module(axes, obj=None)
axes.axes.label_format = '%-3.0f'
axes.axes.width = 2
axes.axes.x_label = 'i'
axes.axes.y_label = 'j'
axes.axes.z_label = 'k'
axes.label_text_property.color = (0, 0, 0)
axes.label_text_property.opacity = 0.0
axes.label_text_property.shadow = True
axes.label_text_property.shadow_offset = numpy.array([ 1, -1])
axes.property.color = (0, 0, 0)
axes.property.display_location = 'background'
axes.title_text_property.color = (0, 0, 0)
axes.title_text_property.shadow_offset = numpy.array([ 1, -1])
figure.scene.disable_render = False
figure.scene.camera.compute_view_plane_normal()
import os.path
#-------------------------------------------------------------------------------------------------------
#./spammsand_invsqrt 33_x8_11_S.mm 1.d-1 1.d-3 1.d-1 1.d-1 D U R b=16
# figure.scene.isometric_view()
# png_filename = os.path.splitext(filename)[0] + "_isov.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
# figure.scene.camera.position = [2381.7518163797836, 2526.3678093421449, 2530.13269951962]
# figure.scene.camera.focal_point = [440.00000000000028, 440.0000000000029, 439.99999999999733]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.4189314063923294, -0.41776697205346547, 0.80620545383879905]
# figure.scene.camera.clipping_range = [1986.7866107311997, 5491.0522577990569]
# figure.scene.camera.compute_view_plane_normal()
# figure.scene.render()
# png_filename = os.path.splitext(filename)[0] + "_cant_x.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
#./spammsand_invsqrt water_500_to_6-311gss.mm 1.d-2 1.d-4 1.d-1 0.d0 D U R
figure.scene.camera.position = [35816.735234550884, 38331.094829602851, 41443.525860211055]
figure.scene.camera.focal_point = [2614.1156973829502, 2621.6382407405645, -241.34477379674968]
figure.scene.camera.view_angle = 30.0
figure.scene.camera.view_up = [-0.45361775222697864, -0.4654155004102597, 0.76001272807921516]
figure.scene.camera.clipping_range = [26313.825398895184, 87716.669164634935]
figure.scene.camera.compute_view_plane_normal()
figure.scene.render()
png_filename = os.path.splitext(filename)[0] + "_cant_x.png"
print("Saving image to " + png_filename)
figure.scene.save(png_filename,size=(768,768))
#./spammsand_invsqrt water_100_to_6-311gss.mm 1.d-1 1.d-3 1.d-1 1.d-1 D U R
# figure.scene.isometric_view()
# png_filename = os.path.splitext(filename)[0] + "_isov.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
# figure.scene.camera.position = [7131.7121897731495, 7525.4214914466402, 8101.2951483680154]
# figure.scene.camera.focal_point = [1702.818579072205, 1686.6399910935772, 1285.485703136407]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.45361775222697859, -0.46541550041025964, 0.76001272807921505]
# figure.scene.camera.clipping_range = [5042.9256084193876, 17324.211816361931]
# figure.scene.camera.compute_view_plane_normal()
# figure.scene.render()
# png_filename = os.path.splitext(filename)[0] + "_cant_x.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename,size=(1024,1024))
#-------------------------------------------------------------------------------------------------------
# ./spammsand_invsqrt bcsstk14.mtx 1.d-2 1.d-4 1.d-1 0.d0 D U R
# figure.scene.camera.position = [1045.203726965188, 1039.2064081296085, 6702.5003353789853]
# figure.scene.camera.focal_point = [874.472594413058, 898.76786979832445, 939.79123074155348]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.70042965936561086, -0.71270269865532587, 0.038120278204521671]
# figure.scene.camera.clipping_range = [3849.5157839671483, 8271.9264727908048]
# figure.scene.camera.compute_view_plane_normal()
# png_filename = os.path.splitext(filename)[0] + "_x_zoomview.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename)
# figure.scene.camera.position = [2030.6693081026092, 2031.6946128119116, 2101.6583772785889]
# figure.scene.camera.focal_point = [904.5, 904.5, 904.5]
# figure.scene.camera.view_angle = 30.0
# figure.scene.camera.view_up = [-0.4254783969169838, -0.42401748949585194, 0.79948564862578286]
# figure.scene.camera.clipping_range = [5.9413455805998874, 5941.3455805998874]
# figure.scene.camera.compute_view_plane_normal()
# figure.scene.render()
# png_filename = os.path.splitext(filename)[0] + "_y_zoomview.png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename)
#-------------------------------------------------------------------------------------------------------
# Turn rendering back on.
# Save the figure to file.
# import os.path
# png_filename = os.path.splitext(filename)[0] + ".png"
# print("Saving image to " + png_filename)
# figure.scene.save(png_filename)
|
FreeON/spammpack
|
spammsand/visualization/plot-2.py
|
Python
|
bsd-3-clause
| 24,103
|
[
"Mayavi"
] |
0946fc64eb57e449d5dd7ffc64ff8d1e91104d25b14c36c789a9dc1eb3bb179d
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
'''
from . import _algorithm
from .. import analyser
class list_sampler(_algorithm):
"""
This class holds the List sampler, which samples from a given spotpy database
"""
_excluded_parameter_classes = ()
def __init__(self, *args, **kwargs):
"""
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
* True: Simulation results will be saved
* False: Simulation results will not be saved
"""
kwargs['algorithm_name'] = 'List Sampler'
super(list_sampler, self).__init__(*args, **kwargs)
def sample(self, repetitions=None):
"""
Parameters
----------
Optional:
repetitions: int
maximum number of function evaluations allowed during sampling
If not given number if iterations will be determined based on given list
"""
parameters = analyser.load_csv_parameter_results(self.dbname)
self.dbname=self.dbname+'list'
if not repetitions:
repetitions=len(parameters)
self.set_repetiton(repetitions)
# Initialization
print('Starting the List sampler with '+str(repetitions)+ ' repetitions...')
param_generator = ((rep, list(parameters[rep]))
for rep in range(int(repetitions)))
for rep, randompar, simulations in self.repeat(param_generator):
# A function that calculates the fitness of the run and the manages the database
self.postprocessing(rep, list(randompar), simulations)
self.final_call()
|
thouska/spotpy
|
spotpy/algorithms/list_sampler.py
|
Python
|
mit
| 3,003
|
[
"Gaussian"
] |
65eabddea901838ed9d187a057b93ec11d3fb6fe630b443f957a389020df76e6
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.standard_library import hooks
with hooks():
from itertools import zip_longest
from re import compile as re_compile
from collections import Counter, defaultdict
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
from skbio import (
BiologicalSequence, NucleotideSequence, DNASequence, RNASequence,
ProteinSequence)
from skbio.sequence import BiologicalSequenceError
class BiologicalSequenceTests(TestCase):
def setUp(self):
self.b1 = BiologicalSequence('GATTACA', quality=range(7))
self.b2 = BiologicalSequence(
'ACCGGTACC', id="test-seq-2",
description="A test sequence")
self.b3 = BiologicalSequence(
'GREG', id="test-seq-3", description="A protein sequence")
self.b4 = BiologicalSequence(
'PRTEIN', id="test-seq-4")
self.b5 = BiologicalSequence(
'LLPRTEIN', description="some description")
self.b6 = BiologicalSequence('ACGTACGTACGT')
self.b7 = BiologicalSequence('..--..', quality=range(6))
self.b8 = BiologicalSequence('HE..--..LLO', id='hello',
description='gapped hello',
quality=range(11))
def test_init_varied_input(self):
# init as string
b = BiologicalSequence('ACCGGXZY')
self.assertEqual(str(b), 'ACCGGXZY')
self.assertEqual(b.id, "")
self.assertEqual(b.description, "")
# init as string with optional values
b = BiologicalSequence(
'ACCGGXZY', 'test-seq-1', 'The first test sequence')
self.assertEqual(str(b), 'ACCGGXZY')
self.assertEqual(b.id, "test-seq-1")
self.assertEqual(b.description, "The first test sequence")
# test init as a different string
b = BiologicalSequence('WRRTY')
self.assertEqual(str(b), 'WRRTY')
# init as list
b = BiologicalSequence(list('ACCGGXZY'))
self.assertEqual(str(b), 'ACCGGXZY')
self.assertEqual(b.id, "")
self.assertEqual(b.description, "")
# init as tuple
b = BiologicalSequence(tuple('ACCGGXZY'))
self.assertEqual(str(b), 'ACCGGXZY')
self.assertEqual(b.id, "")
self.assertEqual(b.description, "")
def test_init_with_validation(self):
self.assertRaises(BiologicalSequenceError, BiologicalSequence, "ACC",
validate=True)
try:
# no error raised when only allow characters are passed
BiologicalSequence("..--..", validate=True)
except BiologicalSequenceError:
self.assertTrue(False)
def test_init_with_invalid_quality(self):
# invalid dtype
with self.assertRaises(TypeError):
BiologicalSequence('ACGT', quality=[2, 3, 4.1, 5])
# wrong number of dimensions (2-D)
with self.assertRaisesRegexp(BiologicalSequenceError, '1-D'):
BiologicalSequence('ACGT', quality=[[2, 3], [4, 5]])
# wrong number of elements
with self.assertRaisesRegexp(BiologicalSequenceError, '\(3\).*\(4\)'):
BiologicalSequence('ACGT', quality=[2, 3, 4])
# negatives
with self.assertRaisesRegexp(BiologicalSequenceError,
'quality scores.*greater than.*zero'):
BiologicalSequence('ACGT', quality=[2, 3, -1, 4])
def test_contains(self):
self.assertTrue('G' in self.b1)
self.assertFalse('g' in self.b1)
def test_eq_and_ne(self):
self.assertTrue(self.b1 == self.b1)
self.assertTrue(self.b2 == self.b2)
self.assertTrue(self.b3 == self.b3)
self.assertTrue(self.b1 != self.b3)
self.assertTrue(self.b1 != self.b2)
self.assertTrue(self.b2 != self.b3)
# identicial sequences of the same type are equal, even if they have
# different ids, descriptions, and/or quality
self.assertTrue(
BiologicalSequence('ACGT') == BiologicalSequence('ACGT'))
self.assertTrue(
BiologicalSequence('ACGT', id='a') ==
BiologicalSequence('ACGT', id='b'))
self.assertTrue(
BiologicalSequence('ACGT', description='c') ==
BiologicalSequence('ACGT', description='d'))
self.assertTrue(
BiologicalSequence('ACGT', id='a', description='c') ==
BiologicalSequence('ACGT', id='b', description='d'))
self.assertTrue(
BiologicalSequence('ACGT', id='a', description='c',
quality=[1, 2, 3, 4]) ==
BiologicalSequence('ACGT', id='b', description='d',
quality=[5, 6, 7, 8]))
# different type causes sequences to not be equal
self.assertFalse(
BiologicalSequence('ACGT') == NucleotideSequence('ACGT'))
def test_getitem(self):
# use equals method to ensure that id, description, and sliced
# quality are correctly propagated to the resulting sequence
self.assertTrue(self.b1[0].equals(
BiologicalSequence('G', quality=(0,))))
self.assertTrue(self.b1[:].equals(
BiologicalSequence('GATTACA', quality=range(7))))
self.assertTrue(self.b1[::-1].equals(
BiologicalSequence('ACATTAG', quality=range(7)[::-1])))
# test a sequence without quality scores
b = BiologicalSequence('ACGT', id='foo', description='bar')
self.assertTrue(b[2:].equals(
BiologicalSequence('GT', id='foo', description='bar')))
self.assertTrue(b[2].equals(
BiologicalSequence('G', id='foo', description='bar')))
def test_getitem_indices(self):
# no ordering, repeated items
self.assertTrue(self.b1[[3, 5, 4, 0, 5, 0]].equals(
BiologicalSequence('TCAGCG', quality=(3, 5, 4, 0, 5, 0))))
# empty list
self.assertTrue(self.b1[[]].equals(BiologicalSequence('', quality=())))
# empty tuple
self.assertTrue(self.b1[()].equals(BiologicalSequence('', quality=())))
# single item
self.assertTrue(
self.b1[[2]].equals(BiologicalSequence('T', quality=(2,))))
# negatives
self.assertTrue(self.b1[[2, -2, 4]].equals(
BiologicalSequence('TCA', quality=(2, 5, 4))))
# tuple
self.assertTrue(self.b1[1, 2, 3].equals(
BiologicalSequence('ATT', quality=(1, 2, 3))))
self.assertTrue(self.b1[(1, 2, 3)].equals(
BiologicalSequence('ATT', quality=(1, 2, 3))))
# test a sequence without quality scores
self.assertTrue(self.b2[5, 4, 1].equals(
BiologicalSequence('TGC', id='test-seq-2',
description='A test sequence')))
def test_getitem_wrong_type(self):
with self.assertRaises(TypeError):
self.b1['1']
def test_getitem_out_of_range(self):
# seq with quality
with self.assertRaises(IndexError):
self.b1[42]
with self.assertRaises(IndexError):
self.b1[[1, 0, 23, 3]]
# seq without quality
with self.assertRaises(IndexError):
self.b2[43]
with self.assertRaises(IndexError):
self.b2[[2, 3, 22, 1]]
def test_hash(self):
self.assertTrue(isinstance(hash(self.b1), int))
def test_iter(self):
b1_iter = iter(self.b1)
for actual, expected in zip(b1_iter, "GATTACA"):
self.assertEqual(actual, expected)
self.assertRaises(StopIteration, lambda: next(b1_iter))
def _compare_k_words_results(self, observed, expected):
for obs, exp in zip_longest(observed, expected, fillvalue=None):
# use equals to compare quality, id, description, sequence, and
# type
self.assertTrue(obs.equals(exp))
def test_k_words_overlapping_true(self):
expected = [
BiologicalSequence('G', quality=[0]),
BiologicalSequence('A', quality=[1]),
BiologicalSequence('T', quality=[2]),
BiologicalSequence('T', quality=[3]),
BiologicalSequence('A', quality=[4]),
BiologicalSequence('C', quality=[5]),
BiologicalSequence('A', quality=[6])
]
self._compare_k_words_results(
self.b1.k_words(1, overlapping=True), expected)
expected = [
BiologicalSequence('GA', quality=[0, 1]),
BiologicalSequence('AT', quality=[1, 2]),
BiologicalSequence('TT', quality=[2, 3]),
BiologicalSequence('TA', quality=[3, 4]),
BiologicalSequence('AC', quality=[4, 5]),
BiologicalSequence('CA', quality=[5, 6])
]
self._compare_k_words_results(
self.b1.k_words(2, overlapping=True), expected)
expected = [
BiologicalSequence('GAT', quality=[0, 1, 2]),
BiologicalSequence('ATT', quality=[1, 2, 3]),
BiologicalSequence('TTA', quality=[2, 3, 4]),
BiologicalSequence('TAC', quality=[3, 4, 5]),
BiologicalSequence('ACA', quality=[4, 5, 6])
]
self._compare_k_words_results(
self.b1.k_words(3, overlapping=True), expected)
expected = [
BiologicalSequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
]
self._compare_k_words_results(
self.b1.k_words(7, overlapping=True), expected)
self.assertEqual(list(self.b1.k_words(8, overlapping=True)), [])
def test_k_words_overlapping_false(self):
expected = [
BiologicalSequence('G', quality=[0]),
BiologicalSequence('A', quality=[1]),
BiologicalSequence('T', quality=[2]),
BiologicalSequence('T', quality=[3]),
BiologicalSequence('A', quality=[4]),
BiologicalSequence('C', quality=[5]),
BiologicalSequence('A', quality=[6])
]
self._compare_k_words_results(
self.b1.k_words(1, overlapping=False), expected)
expected = [
BiologicalSequence('GA', quality=[0, 1]),
BiologicalSequence('TT', quality=[2, 3]),
BiologicalSequence('AC', quality=[4, 5])
]
self._compare_k_words_results(
self.b1.k_words(2, overlapping=False), expected)
expected = [
BiologicalSequence('GAT', quality=[0, 1, 2]),
BiologicalSequence('TAC', quality=[3, 4, 5])
]
self._compare_k_words_results(
self.b1.k_words(3, overlapping=False), expected)
expected = [
BiologicalSequence('GATTACA', quality=[0, 1, 2, 3, 4, 5, 6])
]
self._compare_k_words_results(
self.b1.k_words(7, overlapping=False), expected)
self.assertEqual(list(self.b1.k_words(8, overlapping=False)), [])
def test_k_words_invalid_k(self):
with self.assertRaises(ValueError):
list(self.b1.k_words(0))
with self.assertRaises(ValueError):
list(self.b1.k_words(-42))
def test_k_words_different_sequences(self):
expected = [
BiologicalSequence('HE.', quality=[0, 1, 2], id='hello',
description='gapped hello'),
BiologicalSequence('.--', quality=[3, 4, 5], id='hello',
description='gapped hello'),
BiologicalSequence('..L', quality=[6, 7, 8], id='hello',
description='gapped hello')
]
self._compare_k_words_results(
self.b8.k_words(3, overlapping=False), expected)
b = BiologicalSequence('')
self.assertEqual(list(b.k_words(3)), [])
def test_k_word_counts(self):
# overlapping = True
expected = Counter('GATTACA')
self.assertEqual(self.b1.k_word_counts(1, overlapping=True),
expected)
expected = Counter(['GAT', 'ATT', 'TTA', 'TAC', 'ACA'])
self.assertEqual(self.b1.k_word_counts(3, overlapping=True),
expected)
# overlapping = False
expected = Counter(['GAT', 'TAC'])
self.assertEqual(self.b1.k_word_counts(3, overlapping=False),
expected)
expected = Counter(['GATTACA'])
self.assertEqual(self.b1.k_word_counts(7, overlapping=False),
expected)
def test_k_word_frequencies(self):
# overlapping = True
expected = defaultdict(int)
expected['A'] = 3/7.
expected['C'] = 1/7.
expected['G'] = 1/7.
expected['T'] = 2/7.
self.assertEqual(self.b1.k_word_frequencies(1, overlapping=True),
expected)
expected = defaultdict(int)
expected['GAT'] = 1/5.
expected['ATT'] = 1/5.
expected['TTA'] = 1/5.
expected['TAC'] = 1/5.
expected['ACA'] = 1/5.
self.assertEqual(self.b1.k_word_frequencies(3, overlapping=True),
expected)
# overlapping = False
expected = defaultdict(int)
expected['GAT'] = 1/2.
expected['TAC'] = 1/2.
self.assertEqual(self.b1.k_word_frequencies(3, overlapping=False),
expected)
expected = defaultdict(int)
expected['GATTACA'] = 1.0
self.assertEqual(self.b1.k_word_frequencies(7, overlapping=False),
expected)
expected = defaultdict(int)
empty = BiologicalSequence('')
self.assertEqual(empty.k_word_frequencies(1, overlapping=False),
expected)
def test_len(self):
self.assertEqual(len(self.b1), 7)
self.assertEqual(len(self.b2), 9)
self.assertEqual(len(self.b3), 4)
def test_repr(self):
self.assertEqual(repr(self.b1),
"<BiologicalSequence: GATTACA (length: 7)>")
self.assertEqual(repr(self.b6),
"<BiologicalSequence: ACGTACGTAC... (length: 12)>")
def test_reversed(self):
b1_reversed = reversed(self.b1)
for actual, expected in zip(b1_reversed, "ACATTAG"):
self.assertEqual(actual, expected)
self.assertRaises(StopIteration, lambda: next(b1_reversed))
def test_str(self):
self.assertEqual(str(self.b1), "GATTACA")
self.assertEqual(str(self.b2), "ACCGGTACC")
self.assertEqual(str(self.b3), "GREG")
def test_alphabet(self):
self.assertEqual(self.b1.alphabet(), set())
def test_gap_alphabet(self):
self.assertEqual(self.b1.gap_alphabet(), set('-.'))
def test_sequence(self):
self.assertEqual(self.b1.sequence, "GATTACA")
self.assertEqual(self.b2.sequence, "ACCGGTACC")
self.assertEqual(self.b3.sequence, "GREG")
def test_id(self):
self.assertEqual(self.b1.id, "")
self.assertEqual(self.b2.id, "test-seq-2")
self.assertEqual(self.b3.id, "test-seq-3")
def test_description(self):
self.assertEqual(self.b1.description, "")
self.assertEqual(self.b2.description, "A test sequence")
self.assertEqual(self.b3.description, "A protein sequence")
def test_quality(self):
a = BiologicalSequence('ACA', quality=(22, 22, 1))
# should get back a read-only numpy array of int dtype
self.assertIsInstance(a.quality, np.ndarray)
self.assertEqual(a.quality.dtype, np.int)
npt.assert_equal(a.quality, np.array((22, 22, 1)))
# test that we can't mutate the quality scores
with self.assertRaises(ValueError):
a.quality[1] = 42
# test that we can't set the property
with self.assertRaises(AttributeError):
a.quality = (22, 22, 42)
def test_quality_not_provided(self):
b = BiologicalSequence('ACA')
self.assertIs(b.quality, None)
def test_quality_scalar(self):
b = BiologicalSequence('G', quality=2)
self.assertIsInstance(b.quality, np.ndarray)
self.assertEqual(b.quality.dtype, np.int)
self.assertEqual(b.quality.shape, (1,))
npt.assert_equal(b.quality, np.array([2]))
def test_quality_empty(self):
b = BiologicalSequence('', quality=[])
self.assertIsInstance(b.quality, np.ndarray)
self.assertEqual(b.quality.dtype, np.int)
self.assertEqual(b.quality.shape, (0,))
npt.assert_equal(b.quality, np.array([]))
def test_quality_no_copy(self):
qual = np.array([22, 22, 1])
a = BiologicalSequence('ACA', quality=qual)
self.assertIs(a.quality, qual)
with self.assertRaises(ValueError):
a.quality[1] = 42
with self.assertRaises(ValueError):
qual[1] = 42
def test_has_quality(self):
a = BiologicalSequence('ACA', quality=(5, 4, 67))
self.assertTrue(a.has_quality())
b = BiologicalSequence('ACA')
self.assertFalse(b.has_quality())
def test_copy_default_behavior(self):
# minimal sequence, sequence with all optional attributes present, and
# a subclass of BiologicalSequence
for seq in self.b6, self.b8, RNASequence('ACGU', id='rna seq'):
copy = seq.copy()
self.assertTrue(seq.equals(copy))
self.assertFalse(seq is copy)
def test_copy_update_single_attribute(self):
copy = self.b8.copy(id='new id')
self.assertFalse(self.b8 is copy)
# they don't compare equal when we compare all attributes...
self.assertFalse(self.b8.equals(copy))
# ...but they *do* compare equal when we ignore id, as that was the
# only attribute that changed
self.assertTrue(self.b8.equals(copy, ignore=['id']))
# id should be what we specified in the copy call...
self.assertEqual(copy.id, 'new id')
# ..and shouldn't have changed on the original sequence
self.assertEqual(self.b8.id, 'hello')
def test_copy_update_multiple_attributes(self):
copy = self.b8.copy(id='new id', quality=range(20, 25),
sequence='ACGTA', description='new desc')
self.assertFalse(self.b8 is copy)
self.assertFalse(self.b8.equals(copy))
# attributes should be what we specified in the copy call...
self.assertEqual(copy.id, 'new id')
npt.assert_equal(copy.quality, np.array([20, 21, 22, 23, 24]))
self.assertEqual(copy.sequence, 'ACGTA')
self.assertEqual(copy.description, 'new desc')
# ..and shouldn't have changed on the original sequence
self.assertEqual(self.b8.id, 'hello')
npt.assert_equal(self.b8.quality, range(11))
self.assertEqual(self.b8.sequence, 'HE..--..LLO')
self.assertEqual(self.b8.description, 'gapped hello')
def test_copy_invalid_kwargs(self):
with self.assertRaises(TypeError):
self.b2.copy(id='bar', unrecognized_kwarg='baz')
def test_copy_extra_non_attribute_kwargs(self):
# test that we can pass through additional kwargs to the constructor
# that aren't related to biological sequence attributes (i.e., they
# aren't state that has to be copied)
# create an invalid DNA sequence
a = DNASequence('FOO', description='foo')
# should be able to copy it b/c validate defaults to False
b = a.copy()
self.assertTrue(a.equals(b))
self.assertFalse(a is b)
# specifying validate should raise an error when the copy is
# instantiated
with self.assertRaises(BiologicalSequenceError):
a.copy(validate=True)
def test_equals_true(self):
# sequences match, all other attributes are not provided
self.assertTrue(
BiologicalSequence('ACGT').equals(BiologicalSequence('ACGT')))
# all attributes are provided and match
a = BiologicalSequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
b = BiologicalSequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
self.assertTrue(a.equals(b))
# ignore type
a = BiologicalSequence('ACGT')
b = DNASequence('ACGT')
self.assertTrue(a.equals(b, ignore=['type']))
# ignore id
a = BiologicalSequence('ACGT', id='foo')
b = BiologicalSequence('ACGT', id='bar')
self.assertTrue(a.equals(b, ignore=['id']))
# ignore description
a = BiologicalSequence('ACGT', description='foo')
b = BiologicalSequence('ACGT', description='bar')
self.assertTrue(a.equals(b, ignore=['description']))
# ignore quality
a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
b = BiologicalSequence('ACGT', quality=[5, 6, 7, 8])
self.assertTrue(a.equals(b, ignore=['quality']))
# ignore sequence
a = BiologicalSequence('ACGA')
b = BiologicalSequence('ACGT')
self.assertTrue(a.equals(b, ignore=['sequence']))
# ignore everything
a = BiologicalSequence('ACGA', id='foo', description='abc',
quality=[1, 2, 3, 4])
b = DNASequence('ACGT', id='bar', description='def',
quality=[5, 6, 7, 8])
self.assertTrue(a.equals(b, ignore=['quality', 'description', 'id',
'sequence', 'type']))
def test_equals_false(self):
# type mismatch
a = BiologicalSequence('ACGT', id='foo', description='abc',
quality=[1, 2, 3, 4])
b = NucleotideSequence('ACGT', id='bar', description='def',
quality=[5, 6, 7, 8])
self.assertFalse(a.equals(b, ignore=['quality', 'description', 'id']))
# id mismatch
a = BiologicalSequence('ACGT', id='foo')
b = BiologicalSequence('ACGT', id='bar')
self.assertFalse(a.equals(b))
# description mismatch
a = BiologicalSequence('ACGT', description='foo')
b = BiologicalSequence('ACGT', description='bar')
self.assertFalse(a.equals(b))
# quality mismatch (both provided)
a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
b = BiologicalSequence('ACGT', quality=[1, 2, 3, 5])
self.assertFalse(a.equals(b))
# quality mismatch (one provided)
a = BiologicalSequence('ACGT', quality=[1, 2, 3, 4])
b = BiologicalSequence('ACGT')
self.assertFalse(a.equals(b))
# sequence mismatch
a = BiologicalSequence('ACGT')
b = BiologicalSequence('TGCA')
self.assertFalse(a.equals(b))
def test_count(self):
self.assertEqual(self.b1.count('A'), 3)
self.assertEqual(self.b1.count('T'), 2)
self.assertEqual(self.b1.count('TT'), 1)
def test_degap(self):
# use equals method to ensure that id, description, and filtered
# quality are correctly propagated to the resulting sequence
# no filtering, has quality
self.assertTrue(self.b1.degap().equals(self.b1))
# no filtering, doesn't have quality
self.assertTrue(self.b2.degap().equals(self.b2))
# everything is filtered, has quality
self.assertTrue(self.b7.degap().equals(
BiologicalSequence('', quality=[])))
# some filtering, has quality
self.assertTrue(self.b8.degap().equals(
BiologicalSequence('HELLO', id='hello', description='gapped hello',
quality=[0, 1, 8, 9, 10])))
def test_distance(self):
# note that test_hamming_distance covers default behavior more
# extensively
self.assertEqual(self.b1.distance(self.b1), 0.0)
self.assertEqual(self.b1.distance(BiologicalSequence('GATTACC')), 1./7)
def dumb_distance(x, y):
return 42
self.assertEqual(
self.b1.distance(self.b1, distance_fn=dumb_distance), 42)
def test_distance_unequal_length(self):
# Hamming distance (default) requires that sequences are of equal
# length
with self.assertRaises(BiologicalSequenceError):
self.b1.distance(self.b2)
# alternate distance functions don't have that requirement (unless
# it's implemented within the provided distance function)
def dumb_distance(x, y):
return 42
self.assertEqual(
self.b1.distance(self.b2, distance_fn=dumb_distance), 42)
def test_fraction_diff(self):
self.assertEqual(self.b1.fraction_diff(self.b1), 0., 5)
self.assertEqual(
self.b1.fraction_diff(BiologicalSequence('GATTACC')), 1. / 7., 5)
def test_fraction_same(self):
self.assertAlmostEqual(self.b1.fraction_same(self.b1), 1., 5)
self.assertAlmostEqual(
self.b1.fraction_same(BiologicalSequence('GATTACC')), 6. / 7., 5)
def test_gap_maps(self):
# in sequence with no gaps, the gap_maps are identical
self.assertEqual(self.b1.gap_maps(),
([0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6]))
# in sequence with all gaps, the map of degapped to gapped is the empty
# list (bc its length is 0), and the map of gapped to degapped is all
# None
self.assertEqual(self.b7.gap_maps(),
([], [None, None, None, None, None, None]))
self.assertEqual(self.b8.gap_maps(),
([0, 1, 8, 9, 10],
[0, 1, None, None, None, None, None, None, 2, 3, 4]))
# example from the gap_maps doc string
self.assertEqual(BiologicalSequence('-ACCGA-TA-').gap_maps(),
([1, 2, 3, 4, 5, 7, 8],
[None, 0, 1, 2, 3, 4, None, 5, 6, None]))
def test_gap_vector(self):
self.assertEqual(self.b1.gap_vector(),
[False] * len(self.b1))
self.assertEqual(self.b7.gap_vector(),
[True] * len(self.b7))
self.assertEqual(self.b8.gap_vector(),
[False, False, True, True, True, True,
True, True, False, False, False])
def test_unsupported_characters(self):
self.assertEqual(self.b1.unsupported_characters(), set('GATC'))
self.assertEqual(self.b7.unsupported_characters(), set())
def test_has_unsupported_characters(self):
self.assertTrue(self.b1.has_unsupported_characters())
self.assertFalse(self.b7.has_unsupported_characters())
def test_index(self):
""" index functions as expected """
self.assertEqual(self.b1.index('G'), 0)
self.assertEqual(self.b1.index('A'), 1)
self.assertEqual(self.b1.index('AC'), 4)
self.assertRaises(ValueError, self.b1.index, 'x')
def test_is_gap(self):
self.assertTrue(self.b1.is_gap('.'))
self.assertTrue(self.b1.is_gap('-'))
self.assertFalse(self.b1.is_gap('A'))
self.assertFalse(self.b1.is_gap('x'))
self.assertFalse(self.b1.is_gap(' '))
self.assertFalse(self.b1.is_gap(''))
def test_is_gapped(self):
self.assertFalse(self.b1.is_gapped())
self.assertFalse(self.b2.is_gapped())
self.assertTrue(self.b7.is_gapped())
self.assertTrue(self.b8.is_gapped())
def test_is_valid(self):
self.assertFalse(self.b1.is_valid())
self.assertTrue(self.b7.is_valid())
def test_to_fasta(self):
self.assertEqual(self.b1.to_fasta(), ">\nGATTACA\n")
self.assertEqual(self.b1.to_fasta(terminal_character=""), ">\nGATTACA")
self.assertEqual(self.b2.to_fasta(),
">test-seq-2 A test sequence\nACCGGTACC\n")
self.assertEqual(self.b3.to_fasta(),
">test-seq-3 A protein sequence\nGREG\n")
self.assertEqual(self.b4.to_fasta(),
">test-seq-4\nPRTEIN\n")
self.assertEqual(self.b5.to_fasta(),
"> some description\nLLPRTEIN\n")
# alt parameters
self.assertEqual(self.b2.to_fasta(field_delimiter=":"),
">test-seq-2:A test sequence\nACCGGTACC\n")
self.assertEqual(self.b2.to_fasta(terminal_character="!"),
">test-seq-2 A test sequence\nACCGGTACC!")
self.assertEqual(
self.b2.to_fasta(field_delimiter=":", terminal_character="!"),
">test-seq-2:A test sequence\nACCGGTACC!")
def test_upper(self):
b = NucleotideSequence('GAt.ACa-', id='x', description='42',
quality=range(8))
expected = NucleotideSequence('GAT.ACA-', id='x',
description='42', quality=range(8))
# use equals method to ensure that id, description, and quality are
# correctly propagated to the resulting sequence
self.assertTrue(b.upper().equals(expected))
def test_lower(self):
b = NucleotideSequence('GAt.ACa-', id='x', description='42',
quality=range(8))
expected = NucleotideSequence('gat.aca-', id='x',
description='42', quality=range(8))
# use equals method to ensure that id, description, and quality are
# correctly propagated to the resulting sequence
self.assertTrue(b.lower().equals(expected))
def test_regex_iter(self):
pat = re_compile('(T+A)(CA)')
obs = list(self.b1.regex_iter(pat))
exp = [(2, 5, 'TTA'), (5, 7, 'CA')]
self.assertEqual(obs, exp)
obs = list(self.b1.regex_iter(pat, retrieve_group_0=True))
exp = [(2, 7, 'TTACA'), (2, 5, 'TTA'), (5, 7, 'CA')]
self.assertEqual(obs, exp)
def test_find_features_nonexistent_feature_type(self):
with self.assertRaises(ValueError):
list(self.b1.find_features('purine_run'))
class NucelotideSequenceTests(TestCase):
def setUp(self):
self.empty = NucleotideSequence('')
self.b1 = NucleotideSequence('GATTACA')
self.b2 = NucleotideSequence(
'ACCGGUACC', id="test-seq-2",
description="A test sequence")
def test_alphabet(self):
exp = {
'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'T',
'W', 'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's',
'r', 'u', 't', 'w', 'v', 'y'
}
# Test calling from an instance and purely static context.
self.assertEqual(self.b1.alphabet(), exp)
self.assertEqual(NucleotideSequence.alphabet(), exp)
def test_gap_alphabet(self):
self.assertEqual(self.b1.gap_alphabet(), set('-.'))
def test_complement_map(self):
exp = {}
self.assertEqual(self.b1.complement_map(), exp)
self.assertEqual(NucleotideSequence.complement_map(), exp)
def test_iupac_standard_characters(self):
exp = set("ACGTUacgtu")
self.assertEqual(self.b1.iupac_standard_characters(), exp)
self.assertEqual(NucleotideSequence.iupac_standard_characters(), exp)
def test_iupac_degeneracies(self):
exp = {
# upper
'B': set(['C', 'U', 'T', 'G']), 'D': set(['A', 'U', 'T', 'G']),
'H': set(['A', 'C', 'U', 'T']), 'K': set(['U', 'T', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'T', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']),
'W': set(['A', 'U', 'T']), 'V': set(['A', 'C', 'G']),
'Y': set(['C', 'U', 'T']),
# lower
'b': set(['c', 'u', 't', 'g']), 'd': set(['a', 'u', 't', 'g']),
'h': set(['a', 'c', 'u', 't']), 'k': set(['u', 't', 'g']),
'm': set(['a', 'c']), 'n': set(['a', 'c', 'u', 't', 'g']),
's': set(['c', 'g']), 'r': set(['a', 'g']),
'w': set(['a', 'u', 't']), 'v': set(['a', 'c', 'g']),
'y': set(['c', 'u', 't'])
}
self.assertEqual(self.b1.iupac_degeneracies(), exp)
self.assertEqual(NucleotideSequence.iupac_degeneracies(), exp)
# Test that we can modify a copy of the mapping without altering the
# canonical representation.
degen = NucleotideSequence.iupac_degeneracies()
degen.update({'V': set("BRO"), 'Z': set("ZORRO")})
self.assertNotEqual(degen, exp)
self.assertEqual(NucleotideSequence.iupac_degeneracies(), exp)
def test_iupac_degenerate_characters(self):
exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
self.assertEqual(NucleotideSequence.iupac_degenerate_characters(), exp)
def test_iupac_characters(self):
exp = {
'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'T',
'W', 'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's',
'r', 'u', 't', 'w', 'v', 'y'
}
self.assertEqual(self.b1.iupac_characters(), exp)
self.assertEqual(NucleotideSequence.iupac_characters(), exp)
def test_complement(self):
self.assertRaises(BiologicalSequenceError,
self.b1.complement)
def test_reverse_complement(self):
self.assertRaises(BiologicalSequenceError,
self.b1.reverse_complement)
def test_is_reverse_complement(self):
self.assertRaises(BiologicalSequenceError,
self.b1.is_reverse_complement, self.b1)
def test_nondegenerates_invalid(self):
with self.assertRaises(BiologicalSequenceError):
list(NucleotideSequence('AZA').nondegenerates())
def test_nondegenerates_empty(self):
self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
def test_nondegenerates_no_degens(self):
self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
def test_nondegenerates_all_degens(self):
# Same chars.
exp = [NucleotideSequence('CC'), NucleotideSequence('CG'),
NucleotideSequence('GC'), NucleotideSequence('GG')]
# Sort based on sequence string, as order is not guaranteed.
obs = sorted(NucleotideSequence('SS').nondegenerates(), key=str)
self.assertEqual(obs, exp)
# Different chars.
exp = [NucleotideSequence('AC'), NucleotideSequence('AG'),
NucleotideSequence('GC'), NucleotideSequence('GG')]
obs = sorted(NucleotideSequence('RS').nondegenerates(), key=str)
self.assertEqual(obs, exp)
# Odd number of chars.
obs = list(NucleotideSequence('NNN').nondegenerates())
self.assertEqual(len(obs), 5**3)
def test_nondegenerates_mixed_degens(self):
exp = [NucleotideSequence('AGC'), NucleotideSequence('AGT'),
NucleotideSequence('AGU'), NucleotideSequence('GGC'),
NucleotideSequence('GGT'), NucleotideSequence('GGU')]
obs = sorted(NucleotideSequence('RGY').nondegenerates(), key=str)
self.assertEqual(obs, exp)
def test_nondegenerates_gap_mixed_case(self):
exp = [NucleotideSequence('-A.a'), NucleotideSequence('-A.c'),
NucleotideSequence('-C.a'), NucleotideSequence('-C.c')]
obs = sorted(NucleotideSequence('-M.m').nondegenerates(), key=str)
self.assertEqual(obs, exp)
def test_find_features(self):
exp = [(0, 2, 'GA'), (4, 5, 'A'), (6, 7, 'A')]
obs = list(self.b1.find_features('purine_run'))
self.assertEqual(obs, exp)
exp = [(2, 4, 'TT'), (5, 6, 'C')]
obs = list(self.b1.find_features('pyrimidine_run'))
self.assertEqual(obs, exp)
exp = [(0, 1, 'A'), (3, 5, 'GG'), (6, 7, 'A')]
obs = list(self.b2.find_features('purine_run'))
self.assertEqual(obs, exp)
exp = [(1, 3, 'CC'), (5, 6, 'U'), (7, 9, 'CC')]
obs = list(self.b2.find_features('pyrimidine_run'))
self.assertEqual(obs, exp)
def test_find_features_min_length(self):
exp = [(0, 2, 'GA')]
obs = list(self.b1.find_features('purine_run', 2))
self.assertEqual(obs, exp)
exp = [(2, 4, 'TT')]
obs = list(self.b1.find_features('pyrimidine_run', 2))
self.assertEqual(obs, exp)
exp = [(3, 5, 'GG')]
obs = list(self.b2.find_features('purine_run', 2))
self.assertEqual(obs, exp)
exp = [(1, 3, 'CC'), (7, 9, 'CC')]
obs = list(self.b2.find_features('pyrimidine_run', 2))
self.assertEqual(obs, exp)
def test_find_features_no_feature_type(self):
with self.assertRaises(ValueError):
list(self.b1.find_features('nonexistent_feature_type'))
def test_nondegenerates_propagate_optional_properties(self):
seq = NucleotideSequence('RS', id='foo', description='bar',
quality=[42, 999])
exp = [
NucleotideSequence('AC', id='foo', description='bar',
quality=[42, 999]),
NucleotideSequence('AG', id='foo', description='bar',
quality=[42, 999]),
NucleotideSequence('GC', id='foo', description='bar',
quality=[42, 999]),
NucleotideSequence('GG', id='foo', description='bar',
quality=[42, 999])
]
obs = sorted(seq.nondegenerates(), key=str)
for o, e in zip(obs, exp):
# use equals method to ensure that id, description, and quality are
# correctly propagated to the resulting sequence
self.assertTrue(o.equals(e))
class DNASequenceTests(TestCase):
def setUp(self):
self.empty = DNASequence('')
self.b1 = DNASequence('GATTACA')
self.b2 = DNASequence('ACCGGTACC', id="test-seq-2",
description="A test sequence", quality=range(9))
self.b3 = DNASequence(
'ACCGGUACC', id="bad-seq-1",
description="Not a DNA sequence")
self.b4 = DNASequence(
'MRWSYKVHDBN', id="degen",
description="All of the degenerate bases")
self.b5 = DNASequence('.G--ATTAC-A...')
def test_alphabet(self):
exp = {
'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'T', 'W',
'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
't', 'w', 'v', 'y'
}
self.assertEqual(self.b1.alphabet(), exp)
self.assertEqual(DNASequence.alphabet(), exp)
def test_gap_alphabet(self):
self.assertEqual(self.b1.gap_alphabet(), set('-.'))
def test_complement_map(self):
exp = {
'-': '-', '.': '.', 'A': 'T', 'C': 'G', 'B': 'V', 'D': 'H',
'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
'R': 'Y', 'T': 'A', 'W': 'W', 'V': 'B', 'Y': 'R', 'a': 't',
'c': 'g', 'b': 'v', 'd': 'h', 'g': 'c', 'h': 'd', 'k': 'm',
'm': 'k', 'n': 'n', 's': 's', 'r': 'y', 't': 'a', 'w': 'w',
'v': 'b', 'y': 'r'
}
self.assertEqual(self.b1.complement_map(), exp)
self.assertEqual(DNASequence.complement_map(), exp)
def test_iupac_standard_characters(self):
exp = set("ACGTacgt")
self.assertEqual(self.b1.iupac_standard_characters(), exp)
self.assertEqual(DNASequence.iupac_standard_characters(), exp)
def test_iupac_degeneracies(self):
exp = {
'B': set(['C', 'T', 'G']), 'D': set(['A', 'T', 'G']),
'H': set(['A', 'C', 'T']), 'K': set(['T', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'T', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'T']),
'V': set(['A', 'C', 'G']), 'Y': set(['C', 'T']),
'b': set(['c', 't', 'g']), 'd': set(['a', 't', 'g']),
'h': set(['a', 'c', 't']), 'k': set(['t', 'g']),
'm': set(['a', 'c']), 'n': set(['a', 'c', 't', 'g']),
's': set(['c', 'g']), 'r': set(['a', 'g']), 'w': set(['a', 't']),
'v': set(['a', 'c', 'g']), 'y': set(['c', 't'])
}
self.assertEqual(self.b1.iupac_degeneracies(), exp)
self.assertEqual(DNASequence.iupac_degeneracies(), exp)
def test_iupac_degenerate_characters(self):
exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
self.assertEqual(DNASequence.iupac_degenerate_characters(), exp)
def test_iupac_characters(self):
exp = {
'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'T', 'W',
'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
't', 'w', 'v', 'y'
}
self.assertEqual(self.b1.iupac_characters(), exp)
self.assertEqual(DNASequence.iupac_characters(), exp)
def test_complement(self):
# use equals method to ensure that id, description, and quality are
# correctly propagated to the resulting sequence
self.assertTrue(self.b1.complement().equals(DNASequence("CTAATGT")))
self.assertTrue(self.b2.complement().equals(
DNASequence("TGGCCATGG", id="test-seq-2",
description="A test sequence", quality=range(9))))
self.assertRaises(BiologicalSequenceError, self.b3.complement)
self.assertTrue(self.b4.complement().equals(
DNASequence("KYWSRMBDHVN", id="degen",
description="All of the degenerate bases")))
self.assertTrue(self.b5.complement().equals(
DNASequence(".C--TAATG-T...")))
def test_reverse_complement(self):
# use equals method to ensure that id, description, and (reversed)
# quality scores are correctly propagated to the resulting sequence
self.assertTrue(self.b1.reverse_complement().equals(
DNASequence("TGTAATC")))
self.assertTrue(self.b2.reverse_complement().equals(
DNASequence("GGTACCGGT", id="test-seq-2",
description="A test sequence",
quality=range(9)[::-1])))
self.assertRaises(BiologicalSequenceError, self.b3.reverse_complement)
self.assertTrue(self.b4.reverse_complement().equals(
DNASequence("NVHDBMRSWYK", id="degen",
description="All of the degenerate bases")))
def test_unsupported_characters(self):
self.assertEqual(self.b1.unsupported_characters(), set())
self.assertEqual(self.b2.unsupported_characters(), set())
self.assertEqual(self.b3.unsupported_characters(), set('U'))
self.assertEqual(self.b4.unsupported_characters(), set())
def test_has_unsupported_characters(self):
self.assertFalse(self.b1.has_unsupported_characters())
self.assertFalse(self.b2.has_unsupported_characters())
self.assertTrue(self.b3.has_unsupported_characters())
self.assertFalse(self.b4.has_unsupported_characters())
def test_is_reverse_complement(self):
self.assertFalse(self.b1.is_reverse_complement(self.b1))
# id, description, and quality scores should be ignored (only sequence
# data and type should be compared)
self.assertTrue(self.b1.is_reverse_complement(
DNASequence('TGTAATC', quality=range(7))))
self.assertTrue(
self.b4.is_reverse_complement(DNASequence('NVHDBMRSWYK')))
def test_nondegenerates_invalid(self):
with self.assertRaises(BiologicalSequenceError):
list(DNASequence('AZA').nondegenerates())
def test_nondegenerates_empty(self):
self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
def test_nondegenerates_no_degens(self):
self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
def test_nondegenerates_all_degens(self):
# Same chars.
exp = [DNASequence('CC'), DNASequence('CG'), DNASequence('GC'),
DNASequence('GG')]
# Sort based on sequence string, as order is not guaranteed.
obs = sorted(DNASequence('SS').nondegenerates(), key=str)
self.assertEqual(obs, exp)
# Different chars.
exp = [DNASequence('AC'), DNASequence('AG'), DNASequence('GC'),
DNASequence('GG')]
obs = sorted(DNASequence('RS').nondegenerates(), key=str)
self.assertEqual(obs, exp)
# Odd number of chars.
obs = list(DNASequence('NNN').nondegenerates())
self.assertEqual(len(obs), 4**3)
def test_nondegenerates_mixed_degens(self):
exp = [DNASequence('AGC'), DNASequence('AGT'), DNASequence('GGC'),
DNASequence('GGT')]
obs = sorted(DNASequence('RGY').nondegenerates(), key=str)
self.assertEqual(obs, exp)
def test_nondegenerates_gap_mixed_case(self):
exp = [DNASequence('-A.a'), DNASequence('-A.c'),
DNASequence('-C.a'), DNASequence('-C.c')]
obs = sorted(DNASequence('-M.m').nondegenerates(), key=str)
self.assertEqual(obs, exp)
class RNASequenceTests(TestCase):
def setUp(self):
self.empty = RNASequence('')
self.b1 = RNASequence('GAUUACA')
self.b2 = RNASequence('ACCGGUACC', id="test-seq-2",
description="A test sequence", quality=range(9))
self.b3 = RNASequence(
'ACCGGTACC', id="bad-seq-1",
description="Not a RNA sequence")
self.b4 = RNASequence(
'MRWSYKVHDBN', id="degen",
description="All of the degenerate bases")
self.b5 = RNASequence('.G--AUUAC-A...')
def test_alphabet(self):
exp = {
'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'W',
'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
'u', 'w', 'v', 'y'
}
self.assertEqual(self.b1.alphabet(), exp)
self.assertEqual(RNASequence.alphabet(), exp)
def test_gap_alphabet(self):
self.assertEqual(self.b1.gap_alphabet(), set('-.'))
def test_complement_map(self):
exp = {
'-': '-', '.': '.', 'A': 'U', 'C': 'G', 'B': 'V', 'D': 'H',
'G': 'C', 'H': 'D', 'K': 'M', 'M': 'K', 'N': 'N', 'S': 'S',
'R': 'Y', 'U': 'A', 'W': 'W', 'V': 'B', 'Y': 'R', 'a': 'u',
'c': 'g', 'b': 'v', 'd': 'h', 'g': 'c', 'h': 'd', 'k': 'm',
'm': 'k', 'n': 'n', 's': 's', 'r': 'y', 'u': 'a', 'w': 'w',
'v': 'b', 'y': 'r'
}
self.assertEqual(self.b1.complement_map(), exp)
self.assertEqual(RNASequence.complement_map(), exp)
def test_iupac_standard_characters(self):
exp = set("ACGUacgu")
self.assertEqual(self.b1.iupac_standard_characters(), exp)
self.assertEqual(RNASequence.iupac_standard_characters(), exp)
def test_iupac_degeneracies(self):
exp = {
'B': set(['C', 'U', 'G']), 'D': set(['A', 'U', 'G']),
'H': set(['A', 'C', 'U']), 'K': set(['U', 'G']),
'M': set(['A', 'C']), 'N': set(['A', 'C', 'U', 'G']),
'S': set(['C', 'G']), 'R': set(['A', 'G']), 'W': set(['A', 'U']),
'V': set(['A', 'C', 'G']), 'Y': set(['C', 'U']),
'b': set(['c', 'u', 'g']), 'd': set(['a', 'u', 'g']),
'h': set(['a', 'c', 'u']), 'k': set(['u', 'g']),
'm': set(['a', 'c']), 'n': set(['a', 'c', 'u', 'g']),
's': set(['c', 'g']), 'r': set(['a', 'g']), 'w': set(['a', 'u']),
'v': set(['a', 'c', 'g']), 'y': set(['c', 'u'])
}
self.assertEqual(self.b1.iupac_degeneracies(), exp)
self.assertEqual(RNASequence.iupac_degeneracies(), exp)
def test_iupac_degenerate_characters(self):
exp = set(['B', 'D', 'H', 'K', 'M', 'N', 'S', 'R', 'W', 'V', 'Y',
'b', 'd', 'h', 'k', 'm', 'n', 's', 'r', 'w', 'v', 'y'])
self.assertEqual(self.b1.iupac_degenerate_characters(), exp)
self.assertEqual(RNASequence.iupac_degenerate_characters(), exp)
def test_iupac_characters(self):
exp = {
'A', 'C', 'B', 'D', 'G', 'H', 'K', 'M', 'N', 'S', 'R', 'U', 'W',
'V', 'Y', 'a', 'c', 'b', 'd', 'g', 'h', 'k', 'm', 'n', 's', 'r',
'u', 'w', 'v', 'y'
}
self.assertEqual(self.b1.iupac_characters(), exp)
self.assertEqual(RNASequence.iupac_characters(), exp)
def test_complement(self):
# use equals method to ensure that id, description, and quality are
# correctly propagated to the resulting sequence
self.assertTrue(self.b1.complement().equals(RNASequence("CUAAUGU")))
self.assertTrue(self.b2.complement().equals(
RNASequence("UGGCCAUGG", id="test-seq-2",
description="A test sequence", quality=range(9))))
self.assertRaises(BiologicalSequenceError, self.b3.complement)
self.assertTrue(self.b4.complement().equals(
RNASequence("KYWSRMBDHVN", id="degen",
description="All of the degenerate bases")))
self.assertTrue(self.b5.complement().equals(
RNASequence(".C--UAAUG-U...")))
def test_reverse_complement(self):
# use equals method to ensure that id, description, and (reversed)
# quality scores are correctly propagated to the resulting sequence
self.assertTrue(self.b1.reverse_complement().equals(
RNASequence("UGUAAUC")))
self.assertTrue(self.b2.reverse_complement().equals(
RNASequence("GGUACCGGU", id="test-seq-2",
description="A test sequence",
quality=range(9)[::-1])))
self.assertRaises(BiologicalSequenceError, self.b3.reverse_complement)
self.assertTrue(self.b4.reverse_complement().equals(
RNASequence("NVHDBMRSWYK", id="degen",
description="All of the degenerate bases")))
def test_unsupported_characters(self):
self.assertEqual(self.b1.unsupported_characters(), set())
self.assertEqual(self.b2.unsupported_characters(), set())
self.assertEqual(self.b3.unsupported_characters(), set('T'))
self.assertEqual(self.b4.unsupported_characters(), set())
def test_has_unsupported_characters(self):
self.assertFalse(self.b1.has_unsupported_characters())
self.assertFalse(self.b2.has_unsupported_characters())
self.assertTrue(self.b3.has_unsupported_characters())
self.assertFalse(self.b4.has_unsupported_characters())
def test_is_reverse_complement(self):
self.assertFalse(self.b1.is_reverse_complement(self.b1))
# id, description, and quality scores should be ignored (only sequence
# data and type should be compared)
self.assertTrue(self.b1.is_reverse_complement(
RNASequence('UGUAAUC', quality=range(7))))
self.assertTrue(
self.b4.is_reverse_complement(RNASequence('NVHDBMRSWYK')))
def test_nondegenerates_invalid(self):
with self.assertRaises(BiologicalSequenceError):
list(RNASequence('AZA').nondegenerates())
def test_nondegenerates_empty(self):
self.assertEqual(list(self.empty.nondegenerates()), [self.empty])
def test_nondegenerates_no_degens(self):
self.assertEqual(list(self.b1.nondegenerates()), [self.b1])
def test_nondegenerates_all_degens(self):
# Same chars.
exp = [RNASequence('CC'), RNASequence('CG'), RNASequence('GC'),
RNASequence('GG')]
# Sort based on sequence string, as order is not guaranteed.
obs = sorted(RNASequence('SS').nondegenerates(), key=str)
self.assertEqual(obs, exp)
# Different chars.
exp = [RNASequence('AC'), RNASequence('AG'), RNASequence('GC'),
RNASequence('GG')]
obs = sorted(RNASequence('RS').nondegenerates(), key=str)
self.assertEqual(obs, exp)
# Odd number of chars.
obs = list(RNASequence('NNN').nondegenerates())
self.assertEqual(len(obs), 4**3)
def test_nondegenerates_mixed_degens(self):
exp = [RNASequence('AGC'), RNASequence('AGU'), RNASequence('GGC'),
RNASequence('GGU')]
obs = sorted(RNASequence('RGY').nondegenerates(), key=str)
self.assertEqual(obs, exp)
def test_nondegenerates_gap_mixed_case(self):
exp = [RNASequence('-A.a'), RNASequence('-A.c'),
RNASequence('-C.a'), RNASequence('-C.c')]
obs = sorted(RNASequence('-M.m').nondegenerates(), key=str)
self.assertEqual(obs, exp)
class ProteinSequenceTests(TestCase):
def setUp(self):
self.empty = ProteinSequence('')
self.p1 = ProteinSequence('GREG')
self.p2 = ProteinSequence(
'PRTEINSEQNCE', id="test-seq-2",
description="A test sequence")
self.p3 = ProteinSequence(
'PROTEIN', id="bad-seq-1",
description="Not a protein sequence")
def test_alphabet(self):
exp = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',
'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c',
'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'p', 'q', 'r',
's', 't', 'v', 'w', 'x', 'y', 'z'
}
self.assertEqual(self.p1.alphabet(), exp)
self.assertEqual(ProteinSequence.alphabet(), exp)
def test_gap_alphabet(self):
self.assertEqual(self.p1.gap_alphabet(), set('-.'))
def test_iupac_standard_characters(self):
exp = set("ACDEFGHIKLMNPQRSTVWYacdefghiklmnpqrstvwy")
self.assertEqual(self.p1.iupac_standard_characters(), exp)
self.assertEqual(ProteinSequence.iupac_standard_characters(), exp)
def test_iupac_degeneracies(self):
exp = {
'B': set(['D', 'N']), 'Z': set(['E', 'Q']),
'X': set(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M',
'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']),
'b': set(['d', 'n']), 'z': set(['e', 'q']),
'x': set(['a', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm',
'n', 'p', 'q', 'r', 's', 't', 'v', 'w', 'y']),
}
self.assertEqual(self.p1.iupac_degeneracies(), exp)
self.assertEqual(ProteinSequence.iupac_degeneracies(), exp)
def test_iupac_degenerate_characters(self):
exp = set(['B', 'X', 'Z', 'b', 'x', 'z'])
self.assertEqual(self.p1.iupac_degenerate_characters(), exp)
self.assertEqual(ProteinSequence.iupac_degenerate_characters(), exp)
def test_iupac_characters(self):
exp = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',
'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b',
'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'p', 'q',
'r', 's', 't', 'v', 'w', 'x', 'y', 'z'
}
self.assertEqual(self.p1.iupac_characters(), exp)
self.assertEqual(ProteinSequence.iupac_characters(), exp)
def test_nondegenerates(self):
exp = [ProteinSequence('AD'), ProteinSequence('AN')]
# Sort based on sequence string, as order is not guaranteed.
obs = sorted(ProteinSequence('AB').nondegenerates(), key=str)
self.assertEqual(obs, exp)
if __name__ == "__main__":
main()
|
Kleptobismol/scikit-bio
|
skbio/sequence/tests/test_sequence.py
|
Python
|
bsd-3-clause
| 56,349
|
[
"scikit-bio"
] |
278d9eaf95c56c0c1c0b9b35cc369837e872090170b43bbf5636f735cf77a668
|
#-*- coding: utf8
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""The :mod:`random_layer` module
implements Random Layer transformers.
Random layers are arrays of hidden unit activations that are
random functions of input activation values (dot products for simple
activation functions, distances from prototypes for radial basis
functions).
They are used in the implementation of Extreme Learning Machines (ELMs),
but can be used as a general input mapping.
"""
from abc import ABCMeta, abstractmethod
from math import sqrt
import numpy as np
import scipy.sparse as sp
from scipy.spatial.distance import cdist, pdist, squareform
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_random_state, atleast2d_or_csr
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = ['RandomLayer',
'MLPRandomLayer',
'RBFRandomLayer',
'GRBFRandomLayer',
]
class BaseRandomLayer(BaseEstimator, TransformerMixin):
"""Abstract Base Class for random layers"""
__metaclass__ = ABCMeta
_internal_activation_funcs = dict()
@classmethod
def activation_func_names(cls):
"""Get list of internal activation function names"""
return cls._internal_activation_funcs.keys()
# take n_hidden and random_state, init components_ and
# input_activations_
def __init__(self, n_hidden=20, random_state=0, activation_func=None,
activation_args=None):
self.n_hidden = n_hidden
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.components_ = dict()
self.input_activations_ = None
# keyword args for internally defined funcs
self._extra_args = dict()
@abstractmethod
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
@abstractmethod
def _compute_input_activations(self, X):
"""Compute input activations given X"""
# compute input activations and pass them
# through the hidden layer transfer functions
# to compute the transform
def _compute_hidden_activations(self, X):
"""Compute hidden activations given X"""
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new
# perform fit by generating random components based
# on the input array
def fit(self, X, y=None):
"""Generate a random hidden layer.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training set: only the shape is used to generate random component
values for hidden units
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = atleast2d_or_csr(X)
self._generate_components(X)
return self
# perform transformation by calling compute_hidden_activations
# (which will normally call compute_input_activations first)
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
X = atleast2d_or_csr(X)
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X)
class RandomLayer(BaseRandomLayer):
"""RandomLayer is a transformer that creates a feature mapping of the
inputs that corresponds to a layer of hidden units with randomly
generated components.
The transformed values are a specified function of input activations
that are a weighted combination of dot product (multilayer perceptron)
and distance (rbf) activations:
input_activation = alpha * mlp_activation + (1-alpha) * rbf_activation
mlp_activation(x) = dot(x, weights) + bias
rbf_activation(x) = rbf_width * ||x - center||/radius
alpha and rbf_width are specified by the user
weights and biases are taken from normal distribution of
mean 0 and sd of 1
centers are taken uniformly from the bounding hyperrectangle
of the inputs, and radii are max(||x-c||)/sqrt(n_centers*2)
The input activation is transformed by a transfer function that defaults
to numpy.tanh if not specified, but can be any callable that returns an
array of the same shape as its argument (the input activation array, of
shape [n_samples, n_hidden]). Functions provided are 'sine', 'tanh',
'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian',
'multiquadric', or 'inv_multiquadric'.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate
`alpha` : float, optional (default=0.5)
Mixing coefficient for distance and dot product input activations:
activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation
`rbf_width` : float, optional (default=1.0)
multiplier on rbf_activation
`user_components`: dictionary, optional (default=None)
dictionary containing values for components that woud otherwise be
randomly generated. Valid key/value pairs are as follows:
'radii' : array-like of shape [n_hidden]
'centers': array-like of shape [n_hidden, n_features]
'biases' : array-like of shape [n_hidden]
'weights': array-like of shape [n_features, n_hidden]
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribas',
'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric',
'inv_multiquadric' or a callable. If None is given, 'tanh'
will be used.
If a callable is given, it will be used to compute the activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing dot(x, hidden_weights) + bias for all samples
`components_` : dictionary containing two keys:
`bias_weights_` : numpy array of shape [n_hidden]
`hidden_weights_` : numpy array of shape [n_features, n_hidden]
See Also
--------
"""
# triangular activation function
_tribas = (lambda x: np.clip(1.0 - np.fabs(x), 0.0, 1.0))
# inverse triangular activation function
_inv_tribas = (lambda x: np.clip(np.fabs(x), 0.0, 1.0))
# sigmoid activation function
_sigmoid = (lambda x: 1.0/(1.0 + np.exp(-x)))
# hard limit activation function
_hardlim = (lambda x: np.array(x > 0.0, dtype=float))
_softlim = (lambda x: np.clip(x, 0.0, 1.0))
# gaussian RBF
_gaussian = (lambda x: np.exp(-pow(x, 2.0)))
# multiquadric RBF
_multiquadric = (lambda x:
np.sqrt(1.0 + pow(x, 2.0)))
# inverse multiquadric RBF
_inv_multiquadric = (lambda x:
1.0/(np.sqrt(1.0 + pow(x, 2.0))))
# internal activation function table
_internal_activation_funcs = {'sine': np.sin,
'tanh': np.tanh,
'tribas': _tribas,
'inv_tribas': _inv_tribas,
'sigmoid': _sigmoid,
'softlim': _softlim,
'hardlim': _hardlim,
'gaussian': _gaussian,
'multiquadric': _multiquadric,
'inv_multiquadric': _inv_multiquadric,
}
def __init__(self, n_hidden=20, alpha=0.5, random_state=None,
activation_func='tanh', activation_args=None,
user_components=None, rbf_width=1.0):
super(RandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args)
if (isinstance(self.activation_func, str)):
func_names = self._internal_activation_funcs.keys()
if (self.activation_func not in func_names):
msg = "unknown activation function '%s'" % self.activation_func
raise ValueError(msg)
self.alpha = alpha
self.rbf_width = rbf_width
self.user_components = user_components
self._use_mlp_input = (self.alpha != 0.0)
self._use_rbf_input = (self.alpha != 1.0)
def _get_user_components(self, key):
"""Look for given user component"""
try:
return self.user_components[key]
except (TypeError, KeyError):
return None
def _compute_radii(self):
"""Generate RBF radii"""
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii
def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers"""
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = xrange(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data))
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data))
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers
def _compute_biases(self, rs):
"""Generate MLP biases"""
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases
def _compute_weights(self, X, rs):
"""Generate MLP weights"""
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii()
def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts
class MLPRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 1.0 for MLP activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='tanh', activation_args=None,
weights=None, biases=None):
user_components = {'weights': weights, 'biases': biases}
super(MLPRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
alpha=1.0)
class RBFRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 0.0 for RBF activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='gaussian', activation_args=None,
centers=None, radii=None, rbf_width=1.0):
user_components = {'centers': centers, 'radii': radii}
super(RBFRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
rbf_width=rbf_width,
alpha=0.0)
class GRBFRandomLayer(RBFRandomLayer):
"""Random Generalized RBF Hidden Layer transformer
Creates a layer of radial basis function units where:
f(a), s.t. a = ||x-c||/r
with c the unit center
and f() is exp(-gamma * a^tau) where tau and r are computed
based on [1]
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate, ignored if centers are provided
`grbf_lambda` : float, optional (default=0.05)
GRBF shape parameter
`gamma` : {int, float} optional (default=1.0)
Width multiplier for GRBF distance argument
`centers` : array of shape (n_hidden, n_features), optional (default=None)
If provided, overrides internal computation of the centers
`radii` : array of shape (n_hidden), optional (default=None)
If provided, overrides internal computation of the radii
`use_exemplars` : bool, optional (default=False)
If True, uses random examples from the input to determine the RBF
centers, ignored if centers are provided
`random_state` : int or RandomState instance, optional (default=None)
Control the pseudo random number generator used to generate the
centers at fit time, ignored if centers are provided
Attributes
----------
`components_` : dictionary containing two keys:
`radii_` : numpy array of shape [n_hidden]
`centers_` : numpy array of shape [n_hidden, n_features]
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing ||x-c||/r for all samples
See Also
--------
ELMRegressor, ELMClassifier, SimpleELMRegressor, SimpleELMClassifier,
SimpleRandomLayer
References
----------
.. [1] Fernandez-Navarro, et al, "MELM-GRBF: a modified version of the
extreme learning machine for generalized radial basis function
neural networks", Neurocomputing 74 (2011), 2502-2510
"""
# def _grbf(acts, taus):
# """GRBF activation function"""
# return np.exp(np.exp(-pow(acts, taus)))
_grbf = (lambda acts, taus: np.exp(np.exp(-pow(acts, taus))))
_internal_activation_funcs = {'grbf': _grbf}
def __init__(self, n_hidden=20, grbf_lambda=0.001,
centers=None, radii=None, random_state=None):
super(GRBFRandomLayer, self).__init__(n_hidden=n_hidden,
activation_func='grbf',
centers=centers, radii=radii,
random_state=random_state)
self.grbf_lambda = grbf_lambda
self.dN_vals = None
self.dF_vals = None
self.tau_vals = None
# get centers from superclass, then calculate tau_vals
# according to ref [1]
def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals"""
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals
# get radii according to ref [1]
def _compute_radii(self):
"""Generate radii"""
denom = pow(-np.log(self.grbf_lambda), 1.0/self.tau_vals)
self.components_['radii'] = self.dF_vals/denom
|
ashishbaghudana/mthesis-ashish
|
resources/tees/Utils/Libraries/PythonELM/random_layer.py
|
Python
|
mit
| 18,828
|
[
"Gaussian"
] |
26318986f16c89ee5ea3c6b356471999c3afbad256050cfec6b1b21ad545ed08
|
"""
Using otagrum
=============
"""
# %%
import openturns as ot
import pyAgrum as gum
from matplotlib import pylab as plt
import otagrum
# %%
def showDot(dotstring):
try:
# fails outside notebook
import pyAgrum.lib.notebook as gnb
gnb.showDot(dotstring)
except ImportError:
import pydotplus as dot
from io import BytesIO
g = dot.graph_from_dot_data(dotstring)
with BytesIO() as f:
f.write(g.create_png())
f.seek(0)
img = plt.imread(f)
fig = plt.imshow(img)
fig.axes.axis('off')
plt.show()
# %%
# Creating the CBN structure
# We begin by creating the CBN that will be used throughout this example.
#
# To do so, we need a NamedDAG structure...
# %%
dag = gum.DAG()
# %%
mapping = {}
mapping['A'] = dag.addNode() # Add node A
mapping['B'] = dag.addNode() # Add node B
mapping['C'] = dag.addNode() # Add node C
mapping['D'] = dag.addNode() # Add node D
# %%
dag.addArc(mapping['A'], mapping['C']) # Arc A -> C
dag.addArc(mapping['B'], mapping['C']) # Arc B -> C
dag.addArc(mapping['C'], mapping['D']) # Arc C -> D
# %%
dag
# %%
structure = otagrum.NamedDAG(dag, list(mapping.keys()))
# %%
showDot(structure.toDot())
# %%
# Parameters of the CBN ... and a collection of marginals and local conditional copulas.
# %%
m_list = [ot.Uniform(0.0, 1.0) for i in range(structure.getSize())] # Local marginals
lcc_list = [] # Local Conditional Copulas
for i in range( structure.getSize() ):
dim_lcc = structure.getParents(i).getSize() + 1
R = ot.CorrelationMatrix(dim_lcc)
for j in range(dim_lcc):
for k in range(j):
R[j, k] = 0.6
lcc_list.append( ot.Normal([0.0]*dim_lcc, [1.0]*dim_lcc, R).getCopula() )
# %%
# Now that we have a NamedDAG structure and a collection of local conditional copulas, we can construct a CBN.
# %%
cbn = otagrum.ContinuousBayesianNetwork(structure, m_list, lcc_list)
# %%
# Having a CBN, we can now sample from it.
# %%
ot.RandomGenerator.SetSeed(10) # Set random seed
sample = cbn.getSample(1000)
train = sample[:-100]
test = sample[-100:]
# %%
# Learning the structure with continuous PC:
# Now that we have data, we can use it to learn the structure with the continuous PC algorithm.
# %%
learner = otagrum.ContinuousPC(sample, maxConditioningSetSize=5, alpha=0.1)
# %%
# We first learn the skeleton, that is the undirected structure.
# %%
skeleton = learner.learnSkeleton()
# %%
skeleton
# %%
# Then we look for the v-structures, leading to a Partially Directed Acyclic Graph (PDAG)
# %%
pdag = learner.learnPDAG()
# %%
pdag
# %%
# Finally, the remaining edges are oriented by propagating constraints
# %%
ndag = learner.learnDAG()
# %%
showDot(ndag.toDot())
# %%
# The true structure has been recovered.
# Learning with continuous MIIC
# Otagrum provides another learning algorithm to learn the structure: the continuous MIIC algorithm.
# %%
learner = otagrum.ContinuousMIIC(sample)
# %%
# This algorithm relies on the computing of mutual information which is done through the copula function. Hence, a copula model for the data is needed. The continuous MIIC algorithm can make use of Gaussian copulas (parametric) or Bernstein copulas (non-parametric) to compute mutual information. Moreover, due to finite sampling size, the mutual information estimators need to be corrected. Two kind of correction are provided: NoCorr (no correction) or Naive (a fixed correction is substracted from the raw mutual information estimators). Those behaviours can be changed as follows:
# %%
#learner.setCMode(otagrum.CorrectedMutualInformation.CModeTypes_Bernstein) # By default
learner.setCMode(otagrum.CorrectedMutualInformation.CModeTypes_Gaussian) # To use Gaussian copulas
learner.setKMode(otagrum.CorrectedMutualInformation.KModeTypes_Naive) # By default
#learner.setKMode(otagrum.CorrectedMutualInformation.KModeTypes_NoCorr) # To use the raw estimators
learner.setAlpha(0.01) # Set the correction value for the Naive behaviour, it is set to 0.01 by default
# %%
# As with PC algorithm we can learn the skeleton, PDAG and DAG using
# %%
skeleton = learner.learnSkeleton()
# %%
skeleton
# %%
pdag = learner.learnPDAG()
# %%
pdag
# %%
dag = learner.learnDAG()
# %%
showDot(dag.toDot())
# %%
# Learning parameters
# Bernstein copulas are used to learn the local conditional copulas associated to each node
# %%
m_list = []
lcc_list = []
for i in range(train.getDimension()):
m_list.append(ot.UniformFactory().build(train.getMarginal(i)))
indices = [i] + [int(n) for n in ndag.getParents(i)]
dim_lcc = len(indices)
if dim_lcc == 1:
bernsteinCopula = ot.IndependentCopula(1)
elif dim_lcc > 1:
K = otagrum.ContinuousTTest.GetK(len(train), dim_lcc)
bernsteinCopula = ot.EmpiricalBernsteinCopula(train.getMarginal(indices), K, False)
lcc_list.append(bernsteinCopula)
# %%
# We can now create the learned CBN
# %%
lcbn = otagrum.ContinuousBayesianNetwork(ndag, m_list, lcc_list) # Learned CBN
# %%
# And compare the mean loglikelihood between the true and the learned models
# %%
def compute_mean_LL(cbn, test):
ll = 0
for t in test:
ll += cbn.computeLogPDF(t)
ll /= len(test)
return ll
# %%
true_LL = compute_mean_LL(cbn, test)
print(true_LL)
# %%
exp_LL = compute_mean_LL(lcbn, test)
print(exp_LL)
|
openturns/otagrum
|
python/doc/examples/plot_using_otagrum.py
|
Python
|
lgpl-3.0
| 5,402
|
[
"Gaussian"
] |
60a5ce495dcb73bc9505e73fd28483fc85b93618c2c079ea962c5decca909be3
|
#!/usr/bin/env python
# Copyright (c) 2012 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
#
# This python code is used to migrate checkpoints that were created in one
# version of the simulator to newer version. As features are added or bugs are
# fixed some of the state that needs to be checkpointed can change. If you have
# many historic checkpoints that you use, manually editing them to fix them is
# both time consuming and error-prone.
# This script provides a way to migrate checkpoints to the newer repository in
# a programatic way. It can be imported into another script or used on the
# command line. From the command line the script will either migrate every
# checkpoint it finds recursively (-r option) or a single checkpoint. When a
# change is made to the gem5 repository that breaks previous checkpoints a
# from_N() method should be implemented here and the gem5CheckpointVersion
# variable in src/sim/serialize.hh should be incremented. For each version
# between the checkpoints current version and the new version the from_N()
# method will be run, passing in a ConfigParser object which contains the open
# file. As these operations can be isa specific the method can verify the isa
# and use regexes to find the correct sections that need to be updated.
import ConfigParser
import sys, os
import os.path as osp
# An example of a translator
def from_0(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
# Search for all the execution contexts
if re.search('.*sys.*\.cpu.*\.x.\..*', sec):
# Update each one
mr = cpt.get(sec, 'miscRegs').split()
#mr.insert(21,0)
#mr.insert(26,0)
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
# The backing store supporting the memories in the system has changed
# in that it is now stored globally per address range. As a result the
# actual storage is separate from the memory controllers themselves.
def from_1(cpt):
for sec in cpt.sections():
import re
# Search for a physical memory
if re.search('.*sys.*\.physmem$', sec):
# Add the number of stores attribute to the global physmem
cpt.set(sec, 'nbr_of_stores', '1')
# Get the filename and size as this is moving to the
# specific backing store
mem_filename = cpt.get(sec, 'filename')
mem_size = cpt.get(sec, '_size')
cpt.remove_option(sec, 'filename')
cpt.remove_option(sec, '_size')
# Get the name so that we can create the new section
system_name = str(sec).split('.')[0]
section_name = system_name + '.physmem.store0'
cpt.add_section(section_name)
cpt.set(section_name, 'store_id', '0')
cpt.set(section_name, 'range_size', mem_size)
cpt.set(section_name, 'filename', mem_filename)
elif re.search('.*sys.*\.\w*mem$', sec):
# Due to the lack of information about a start address,
# this migration only works if there is a single memory in
# the system, thus starting at 0
raise ValueError("more than one memory detected (" + sec + ")")
def from_2(cpt):
for sec in cpt.sections():
import re
# Search for a CPUs
if re.search('.*sys.*cpu', sec):
try:
junk = cpt.get(sec, 'instCnt')
cpt.set(sec, '_pid', '0')
except ConfigParser.NoOptionError:
pass
# The ISA is now a separate SimObject, which means that we serialize
# it in a separate section instead of as a part of the ThreadContext.
def from_3(cpt):
isa = cpt.get('root','isa')
isa_fields = {
"alpha" : ( "fpcr", "uniq", "lock_flag", "lock_addr", "ipr" ),
"arm" : ( "miscRegs" ),
"sparc" : ( "asi", "tick", "fprs", "gsr", "softint", "tick_cmpr",
"stick", "stick_cmpr", "tpc", "tnpc", "tstate", "tt",
"tba", "pstate", "tl", "pil", "cwp", "gl", "hpstate",
"htstate", "hintp", "htba", "hstick_cmpr",
"strandStatusReg", "fsr", "priContext", "secContext",
"partId", "lsuCtrlReg", "scratchPad",
"cpu_mondo_head", "cpu_mondo_tail",
"dev_mondo_head", "dev_mondo_tail",
"res_error_head", "res_error_tail",
"nres_error_head", "nres_error_tail",
"tick_intr_sched",
"cpu", "tc_num", "tick_cmp", "stick_cmp", "hstick_cmp"),
"x86" : ( "regVal" ),
}
isa_fields = isa_fields.get(isa, [])
isa_sections = []
for sec in cpt.sections():
import re
re_cpu_match = re.match('^(.*sys.*\.cpu[^.]*)\.xc\.(.+)$', sec)
# Search for all the execution contexts
if not re_cpu_match:
continue
if re_cpu_match.group(2) != "0":
# This shouldn't happen as we didn't support checkpointing
# of in-order and O3 CPUs.
raise ValueError("Don't know how to migrate multi-threaded CPUs "
"from version 1")
isa_section = []
for fspec in isa_fields:
for (key, value) in cpt.items(sec, raw=True):
if key in isa_fields:
isa_section.append((key, value))
name = "%s.isa" % re_cpu_match.group(1)
isa_sections.append((name, isa_section))
for (key, value) in isa_section:
cpt.remove_option(sec, key)
for (sec, options) in isa_sections:
# Some intermediate versions of gem5 have empty ISA sections
# (after we made the ISA a SimObject, but before we started to
# serialize into a separate ISA section).
if not cpt.has_section(sec):
cpt.add_section(sec)
else:
if cpt.items(sec):
raise ValueError("Unexpected populated ISA section in old "
"checkpoint")
for (key, value) in options:
cpt.set(sec, key, value)
# Version 5 of the checkpoint format removes the MISCREG_CPSR_MODE
# register from the ARM register file.
def from_4(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
# Search for all ISA sections
if re.search('.*sys.*\.cpu.*\.isa', sec):
mr = cpt.get(sec, 'miscRegs').split()
# Remove MISCREG_CPSR_MODE
del mr[137]
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
# Version 6 of the checkpoint format adds tlb to x86 checkpoints
def from_5(cpt):
if cpt.get('root','isa') == 'x86':
for sec in cpt.sections():
import re
# Search for all ISA sections
if re.search('.*sys.*\.cpu.*\.dtb$', sec):
cpt.set(sec, '_size', '0')
cpt.set(sec, 'lruSeq', '0')
if re.search('.*sys.*\.cpu.*\.itb$', sec):
cpt.set(sec, '_size', '0')
cpt.set(sec, 'lruSeq', '0')
else:
print "ISA is not x86"
migrations = []
migrations.append(from_0)
migrations.append(from_1)
migrations.append(from_2)
migrations.append(from_3)
migrations.append(from_4)
migrations.append(from_5)
verbose_print = False
def verboseprint(*args):
if not verbose_print:
return
for arg in args:
print arg,
print
def process_file(path, **kwargs):
if not osp.isfile(path):
import errno
raise IOError(ennro.ENOENT, "No such file", path)
verboseprint("Processing file %s...." % path)
if kwargs.get('backup', True):
import shutil
shutil.copyfile(path, path + '.bak')
cpt = ConfigParser.SafeConfigParser()
# gem5 is case sensitive with paramaters
cpt.optionxform = str
# Read the current data
cpt_file = file(path, 'r')
cpt.readfp(cpt_file)
cpt_file.close()
# Make sure we know what we're starting from
if not cpt.has_option('root','cpt_ver'):
raise LookupError("cannot determine version of checkpoint")
cpt_ver = cpt.getint('root','cpt_ver')
# If the current checkpoint is longer than the migrations list, we have a problem
# and someone didn't update this file
if cpt_ver > len(migrations):
raise ValueError("upgrade script is too old and needs updating")
verboseprint("\t...file is at version %#x" % cpt_ver)
if cpt_ver == len(migrations):
verboseprint("\t...nothing to do")
return
# Walk through every function from now until the end fixing the checkpoint
for v in xrange(cpt_ver,len(migrations)):
verboseprint("\t...migrating to version %#x" % (v + 1))
migrations[v](cpt)
cpt.set('root','cpt_ver', str(v + 1))
# Write the old data back
verboseprint("\t...completed")
cpt.write(file(path, 'w'))
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] <filename or directory>")
parser.add_option("-r", "--recurse", action="store_true",
help="Recurse through all subdirectories modifying "\
"each checkpoint that is found")
parser.add_option("-N", "--no-backup", action="store_false",
dest="backup", default=True,
help="Do no backup each checkpoint before modifying it")
parser.add_option("-v", "--verbose", action="store_true",
help="Print out debugging information as")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("You must specify a checkpoint file to modify or a "\
"directory of checkpoints to recursively update")
verbose_print = options.verbose
# Deal with shell variables and ~
path = osp.expandvars(osp.expanduser(args[0]))
# Process a single file if we have it
if osp.isfile(path):
process_file(path, **vars(options))
# Process an entire directory
elif osp.isdir(path):
cpt_file = osp.join(path, 'm5.cpt')
if options.recurse:
# Visit very file and see if it matches
for root,dirs,files in os.walk(path):
for name in files:
if name == 'm5.cpt':
process_file(osp.join(root,name), **vars(options))
for dir in dirs:
pass
# Maybe someone passed a cpt.XXXXXXX directory and not m5.cpt
elif osp.isfile(cpt_file):
process_file(cpt_file, **vars(options))
else:
print "Error: checkpoint file not found at in %s " % path,
print "and recurse not specified"
sys.exit(1)
sys.exit(0)
|
prodromou87/gem5
|
util/cpt_upgrader.py
|
Python
|
bsd-3-clause
| 12,954
|
[
"VisIt"
] |
245c973228a71047459f52d51f6cc4bddd5b4bedf77b42b1130973b8ce3768e8
|
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
s2pi = np.sqrt(2 * np.pi)
def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
"""1 dimensional gaussian:
gaussian(x, amplitude, center, sigma)
"""
return (amplitude / (s2pi * sigma)) * np.exp(-(1.0 * x - center) ** 2 / (2 * sigma ** 2))
|
Dioptas/Dioptas
|
dioptas/model/util/PeakShapes.py
|
Python
|
gpl-3.0
| 1,283
|
[
"Gaussian"
] |
4749e9516f934e0fc3b6b35d3cc8d20e11accc4a294915518cb36bbcd1e34360
|
import os
import mimetypes
import copy
import tempfile
import shutil
import logging
import json
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist, SuspiciousFileOperation
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.contrib.sites.models import Site
from rest_framework.pagination import PageNumberPagination
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import generics, status
from rest_framework.request import Request
from rest_framework.exceptions import ValidationError, NotAuthenticated, PermissionDenied, NotFound
from hs_core import hydroshare
from hs_core.models import AbstractResource
from hs_core.hydroshare.utils import get_resource_by_shortkey, get_resource_types
from hs_core.views import utils as view_utils
from hs_core.views.utils import ACTION_TO_AUTHORIZE
from hs_core.views import serializers
from hs_core.views import pagination
from hs_core.hydroshare.utils import get_file_storage, resource_modified
from hs_core.serialization import GenericResourceMeta, HsDeserializationDependencyException, \
HsDeserializationException
from hs_core.hydroshare.hs_bagit import create_bag_files
logger = logging.getLogger(__name__)
# Mixins
class ResourceToListItemMixin(object):
def resourceToResourceListItem(self, r):
site_url = hydroshare.utils.current_site_url()
bag_url = site_url + AbstractResource.bag_url(r.short_id)
science_metadata_url = site_url + reverse('get_update_science_metadata', args=[r.short_id])
resource_map_url = site_url + reverse('get_resource_map', args=[r.short_id])
resource_url = site_url + r.get_absolute_url()
resource_list_item = serializers.ResourceListItem(resource_type=r.resource_type,
resource_id=r.short_id,
resource_title=r.metadata.title.value,
creator=r.first_creator.name,
public=r.raccess.public,
discoverable=r.raccess.discoverable,
shareable=r.raccess.shareable,
immutable=r.raccess.immutable,
published=r.raccess.published,
date_created=r.created,
date_last_updated=r.updated,
bag_url=bag_url,
science_metadata_url=science_metadata_url,
resource_map_url=resource_map_url,
resource_url=resource_url)
return resource_list_item
class ResourceFileToListItemMixin(object):
def resourceFileToListItem(self, f):
url = hydroshare.utils.current_site_url() + f.resource_file.url
fsize = f.resource_file.size
mimetype = mimetypes.guess_type(url)
if mimetype[0]:
ftype = mimetype[0]
else:
ftype = repr(None)
resource_file_info_item = serializers.ResourceFileItem(url=url,
size=fsize,
content_type=ftype)
return resource_file_info_item
class ResourceTypes(generics.ListAPIView):
"""
Get a list of resource types
REST URL: hsapi/resourceTypes
HTTP method: GET
example return JSON format for GET /hsapi/resourceTypes (note response will consist of only
one page):
[
{
"resource_type": "GenericResource"
},
{
"resource_type": "RasterResource"
},
{
"resource_type": "RefTimeSeries"
},
{
"resource_type": "TimeSeriesResource"
},
{
"resource_type": "NetcdfResource"
},
{
"resource_type": "ModelProgramResource"
},
{
"resource_type": "ModelInstanceResource"
},
{
"resource_type": "ToolResource"
},
{
"resource_type": "SWATModelInstanceResource"
}
]
"""
pagination_class = pagination.SmallDatumPagination
def get(self, request):
return self.list(request)
def get_queryset(self):
return [serializers.ResourceType(resource_type=rtype.__name__) for rtype in
get_resource_types()]
def get_serializer_class(self):
return serializers.ResourceTypesSerializer
class ResourceList(ResourceToListItemMixin, generics.ListAPIView):
"""
Get a list of resources based on the following filter query parameters
DEPRECATED: See GET /resource/ in CreateResource
For an anonymous user, all public resources will be listed.
For any authenticated user with no other query parameters provided in the request, all
resources that are viewable by the user will be listed.
REST URL: hsapi/resourceList/{query parameters}
HTTP method: GET
Supported query parameters (all are optional):
:type owner: str
:type types: list of resource type class names
:type from_date: str (e.g., 2015-04-01)
:type to_date: str (e.g., 2015-05-01)
:type edit_permission: bool
:param owner: (optional) - to get a list of resources owned by a specified username
:param types: (optional) - to get a list of resources of the specified resource types
:param from_date: (optional) - to get a list of resources created on or after this date
:param to_date: (optional) - to get a list of resources created on or before this date
:param edit_permission: (optional) - to get a list of resources for which the authorised user
has edit permission
:rtype: json string
:return: a paginated list of resources with data for resource id, title, resource type,
creator, public, date created, date last updated, resource bag url path, and science
metadata url path
example return JSON format for GET /hsapi/resourceList:
{ "count":n
"next": link to next page
"previous": link to previous page
"results":[
{"resource_type": resource type, "resource_title": resource title,
"resource_id": resource id,
"creator": creator name, "date_created": date resource created,
"date_last_updated": date resource last updated, "public": true or false,
"discoverable": true or false, "shareable": true or false,
"immutable": true or false,
"published": true or false, "bag_url": link to bag file,
"science_metadata_url": link to science metadata,
"resource_url": link to resource landing HTML page},
{"resource_type": resource type, "resource_title": resource title,
"resource_id": resource id,
"creator": creator name, "date_created": date resource created,
"date_last_updated": date resource last updated, "public": true or false,
"discoverable": true or false, "shareable": true or false,
"immutable": true or false,
"published": true or false, "bag_url": link to bag file,
"science_metadata_url": link to science metadata,
"resource_url": link to resource landing HTML page},
]
}
"""
pagination_class = PageNumberPagination
def get(self, request):
return self.list(request)
# needed for list of resources
def get_queryset(self):
resource_list_request_validator = serializers.ResourceListRequestValidator(
data=self.request.query_params)
if not resource_list_request_validator.is_valid():
raise ValidationError(detail=resource_list_request_validator.errors)
filter_parms = resource_list_request_validator.validated_data
filter_parms['user'] = (self.request.user if self.request.user.is_authenticated() else None)
if len(filter_parms['type']) == 0:
filter_parms['type'] = None
else:
filter_parms['type'] = list(filter_parms['type'])
filter_parms['public'] = not self.request.user.is_authenticated()
filtered_res_list = []
for r in hydroshare.get_resource_list(**filter_parms):
resource_list_item = self.resourceToResourceListItem(r)
filtered_res_list.append(resource_list_item)
return filtered_res_list
def get_serializer_class(self):
return serializers.ResourceListItemSerializer
class CheckTaskStatus(generics.RetrieveAPIView):
def get(self, request, task_id):
url = reverse('rest_check_task_status', kwargs={'task_id': task_id})
return HttpResponseRedirect(url)
class ResourceReadUpdateDelete(ResourceToListItemMixin, generics.RetrieveUpdateDestroyAPIView):
"""
Read, update, or delete a resource
REST URL: hsapi/resource/{pk}
HTTP method: GET
:return: (on success): The resource in zipped BagIt format.
REST URL: hsapi/resource/{pk}
HTTP method: DELETE
:return: (on success): JSON string of the format: {'resource_id':pk}
REST URL: hsapi/resource/{pk}
HTTP method: PUT
:return: (on success): JSON string of the format: {'resource_id':pk}
:type str
:param pk: resource id
:rtype: JSON string for http methods DELETE and PUT, and resource file data bytes for GET
:raises:
NotFound: return JSON format: {'detail': 'No resource was found for resource id':pk}
PermissionDenied: return JSON format: {'detail': 'You do not have permission to perform
this action.'}
ValidationError: return JSON format: {parameter-1': ['error message-1'], 'parameter-2':
['error message-2'], .. }
:raises:
ValidationError: return json format: {'parameter-1':['error message-1'], 'parameter-2':
['error message-2'], .. }
"""
pagination_class = PageNumberPagination
allowed_methods = ('GET', 'PUT', 'DELETE')
def get(self, request, pk):
""" Get resource in zipped BagIt format
"""
res, _, _ = view_utils.authorize(request, pk,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
site_url = hydroshare.utils.current_site_url()
if res.resource_type.lower() == "reftimeseriesresource":
# if res is RefTimeSeriesResource
bag_url = site_url + reverse('rest_download_refts_resource_bag',
kwargs={'shortkey': pk})
else:
bag_url = site_url + reverse('rest_download',
kwargs={'path': 'bags/{}.zip'.format(pk)})
return HttpResponseRedirect(bag_url)
def put(self, request, pk):
# TODO: update resource - involves overwriting a resource from the provided bag file
raise NotImplementedError()
def delete(self, request, pk):
# only resource owners are allowed to delete
view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.DELETE_RESOURCE)
hydroshare.delete_resource(pk)
# spec says we need return the id of the resource that got deleted - otherwise would
# have used status code 204 and not 200
return Response(data={'resource_id': pk}, status=status.HTTP_200_OK)
def get_serializer_class(self):
return serializers.ResourceListItemSerializer
class ResourceListCreate(ResourceToListItemMixin, generics.ListCreateAPIView):
"""
Create a new resource or list existing resources
REST URL: hsapi/resource/
HTTP method: POST
Request data payload parameters:
:type resource_type: str
:type title: str
:type edit_users: str
:type edit_groups: str
:type view_users: str
:type view_groups: str
:param resource_type: resource type name
:param title: (optional) title of the resource (default value: 'Untitled resource')
:param edit_users: (optional) list of comma separated usernames that should have edit
permission for the resource
:param edit_groups: (optional) list of comma separated group names that should have edit
permission for the resource
:param view_users: (optional) list of comma separated usernames that should have view
permission for the resource
:param view_groups: (optional) list of comma separated group names that should have view
permission for the resource
:param metadata: (optional) data for any valid metadata element including resource specific
metadata elements can be passed as json string:
example (passing data for the 'Coverage' element):
[{'coverage':{'type': 'period', 'start': '01/01/2000', 'end': '12/12/2010'}}, ...]
Note: the parameter 'metadata' can't be used for passing data for the following core metadata
elements:
Title, Description (abstract), Subject (keyword), Date, Publisher, Type, Format
:param extra_metadata: (optional) data for any user-defined key/value pair metadata elements
of the resource can be passed as json string
example :
{'Outlet Point Latitude': '40', 'Outlet Point Longitude': '-110'}
:return: id and type of the resource created
:rtype: json string of the format: {'resource-id':id, 'resource_type': resource type}
:raises:
NotAuthenticated: return json format: {'detail': 'Authentication credentials were not
provided.'}
ValidationError: return json format: {parameter-1':['error message-1'], 'parameter-2':
['error message-2'], .. }
REST URL: hsapi/resource/
HTTP method: GET
Supported query parameters (all are optional):
:type owner: str
:type types: list of resource type class names
:type from_date: str (e.g., 2015-04-01)
:type to_date: str (e.g., 2015-05-01)
:type edit_permission: bool
:param owner: (optional) - to get a list of resources owned by a specified username
:param types: (optional) - to get a list of resources of the specified resource types
:param from_date: (optional) - to get a list of resources created on or after this date
:param to_date: (optional) - to get a list of resources created on or before this date
:param edit_permission: (optional) - to get a list of resources for which the authorised user
has edit permission
:rtype: json string
:return: a paginated list of resources with data for resource id, title, resource type,
creator, public, date created, date last updated, resource bag url path, and science
metadata url path
example return JSON format for GET /hsapi/resourceList:
{ "count":n
"next": link to next page
"previous": link to previous page
"results":[
{"resource_type": resource type, "resource_title": resource title,
"resource_id": resource id,
"creator": creator name, "date_created": date resource created,
"date_last_updated": date resource last updated, "public": true or false,
"discoverable": true or false, "shareable": true or false,
"immutable": true or false,
"published": true or false, "bag_url": link to bag file,
"science_metadata_url": link to science metadata,
"resource_url": link to resource landing HTML page},
{"resource_type": resource type, "resource_title": resource title,
"resource_id": resource id,
"creator": creator name, "date_created": date resource created,
"date_last_updated": date resource last updated, "public": true or false,
"discoverable": true or false, "shareable": true or false,
"immutable": true or false,
"published": true or false, "bag_url": link to bag file,
"science_metadata_url": link to science metadata,
"resource_url": link to resource landing HTML page},
]
}
"""
def initialize_request(self, request, *args, **kwargs):
"""
Hack to work around the following issue in django-rest-framework:
https://github.com/tomchristie/django-rest-framework/issues/3951
Couch: This issue was recently closed (10/12/2016, 2 days before this writing)
and is slated to be incorporated in the Django REST API 3.5.0 release.
At that time, we should remove this hack.
:param request:
:param args:
:param kwargs:
:return:
"""
if not isinstance(request, Request):
# Don't deep copy the file data as it may contain an open file handle
old_file_data = copy.copy(request.FILES)
old_post_data = copy.deepcopy(request.POST)
request = super(ResourceListCreate, self).initialize_request(request, *args, **kwargs)
request.POST.update(old_post_data)
request.FILES.update(old_file_data)
return request
# Couch: This is called explicitly in the overrided create() method and thus this
# declaration does nothing. Thus, it can be changed to whatever is convenient.
# Currently, it is convenient to use the listing serializer instead, so that
# it will be the default output serializer.
# def get_serializer_class(self):
# return serializers.ResourceCreateRequestValidator
def post(self, request):
return self.create(request)
# Override the create() method from the CreateAPIView class
def create(self, request, *args, **kwargs):
if not request.user.is_authenticated():
raise NotAuthenticated()
resource_create_request_validator = serializers.ResourceCreateRequestValidator(
data=request.data)
if not resource_create_request_validator.is_valid():
raise ValidationError(detail=resource_create_request_validator.errors)
validated_request_data = resource_create_request_validator.validated_data
resource_type = validated_request_data['resource_type']
res_title = validated_request_data.get('title', 'Untitled resource')
keywords = validated_request_data.get('keywords', None)
abstract = validated_request_data.get('abstract', None)
metadata = validated_request_data.get('metadata', None)
extra_metadata = validated_request_data.get('extra_metadata', None)
num_files = len(request.FILES)
# TODO: (Couch) reconsider whether multiple file upload should be
# supported when multipart bug fixed.
if num_files > 0:
if num_files > 1:
raise ValidationError(detail={'file': 'Multiple file upload is not allowed on '
'resource creation. Add additional files '
'after the resource is created.'})
# Place files into format expected by hydroshare.utils.resource_pre_create_actions and
# hydroshare.create_resource, i.e. a tuple of
# django.core.files.uploadedfile.TemporaryUploadedFile objects.
files = [request.FILES['file'], ]
else:
files = []
if metadata is not None:
metadata = json.loads(metadata)
_validate_metadata(metadata)
if extra_metadata is not None:
extra_metadata = json.loads(extra_metadata)
# TODO: validate extra metadata here
try:
_, res_title, metadata, _ = hydroshare.utils.resource_pre_create_actions(
resource_type=resource_type, resource_title=res_title,
page_redirect_url_key=None, files=files, metadata=metadata,
**kwargs)
except Exception as ex:
error_msg = {'resource': "Resource creation failed. %s" % ex.message}
raise ValidationError(detail=error_msg)
try:
resource = hydroshare.create_resource(
resource_type=resource_type,
owner=request.user,
title=res_title,
edit_users=validated_request_data.get('edit_users', None),
view_users=validated_request_data.get('view_users', None),
edit_groups=validated_request_data.get('edit_groups', None),
view_groups=validated_request_data.get('view_groups', None),
keywords=keywords,
metadata=metadata,
extra_metadata=extra_metadata,
files=files
)
if abstract:
resource.metadata.create_element('description', abstract=abstract)
except Exception as ex:
error_msg = {'resource': "Resource creation failed. %s" % ex.message}
raise ValidationError(detail=error_msg)
response_data = {'resource_type': resource_type, 'resource_id': resource.short_id}
return Response(data=response_data, status=status.HTTP_201_CREATED)
pagination_class = PageNumberPagination
def get(self, request):
return self.list(request)
# needed for list of resources
# copied from ResourceList
def get_queryset(self):
resource_list_request_validator = serializers.ResourceListRequestValidator(
data=self.request.query_params)
if not resource_list_request_validator.is_valid():
raise ValidationError(detail=resource_list_request_validator.errors)
filter_parms = resource_list_request_validator.validated_data
filter_parms['user'] = (self.request.user if self.request.user.is_authenticated() else None)
if len(filter_parms['type']) == 0:
filter_parms['type'] = None
else:
filter_parms['type'] = list(filter_parms['type'])
filter_parms['public'] = not self.request.user.is_authenticated()
filtered_res_list = []
for r in hydroshare.get_resource_list(**filter_parms):
resource_list_item = self.resourceToResourceListItem(r)
filtered_res_list.append(resource_list_item)
return filtered_res_list
# covers serialization of output from GET request
def get_serializer_class(self):
return serializers.ResourceListItemSerializer
class SystemMetadataRetrieve(ResourceToListItemMixin, APIView):
"""
Retrieve resource system metadata
REST URL: hsapi/sysmeta/{pk}
HTTP method: GET
:type pk: str
:param pk: id of the resource
:return: system metadata as JSON string
:rtype: str
:raises:
NotFound: return JSON format: {'detail': 'No resource was found for resource id:pk'}
PermissionDenied: return JSON format: {'detail': 'You do not have permission to
perform this action.'}
example return JSON format for GET hsapi/sysmeta/<RESOURCE_ID>:
{
"resource_type": resource type,
"resource_title": resource title,
"resource_id": resource id,
"creator": creator user name,
"date_created": date resource created,
"date_last_updated": date resource last updated,
"public": true or false,
"discoverable": true or false,
"shareable": true or false,
"immutable": true or false,
"published": true or false,
"bag_url": link to bag file,
"science_metadata_url": link to science metadata
}
"""
allowed_methods = ('GET',)
def get(self, request, pk):
""" Get resource system metadata, as well as URLs to the bag and science metadata
"""
res, _, _ = view_utils.authorize(request, pk,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
ser = self.get_serializer_class()(self.resourceToResourceListItem(res))
return Response(data=ser.data, status=status.HTTP_200_OK)
def get_serializer_class(self):
return serializers.ResourceListItemSerializer
class AccessRulesUpdate(APIView):
"""
Set access rules for a resource
REST URL: hsapi/resource/{pk}/access
DEPRECATED: hsapi/resource/accessRules/{pk}
HTTP method: PUT
:type pk: str
:param pk: id of the resource
:return: No content. Status code will 200 (OK)
"""
# TODO: (Couch) Need GET as well.
allowed_methods = ('PUT',)
def put(self, request, pk):
""" Update access rules
"""
# only resource owners are allowed to change resource flags (e.g., public)
view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.SET_RESOURCE_FLAG)
access_rules_validator = serializers.AccessRulesRequestValidator(data=request.data)
if not access_rules_validator.is_valid():
raise ValidationError(detail=access_rules_validator.errors)
validated_request_data = access_rules_validator.validated_data
res = get_resource_by_shortkey(pk)
res.raccess.public = validated_request_data['public']
res.raccess.save()
return Response(data={'resource_id': pk}, status=status.HTTP_200_OK)
class ScienceMetadataRetrieveUpdate(APIView):
"""
Retrieve resource science metadata
REST URL: hsapi/scimeta/{pk}
HTTP method: GET
:type pk: str
:param pk: id of the resource
:return: science metadata as XML document
:rtype: str
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
REST URL: hsapi/scimeta/{pk}
HTTP method: PUT
:type pk: str
:param pk: id of the resource
:type metadata: json
:param metadata: resource metadata
:return: resource id
:rtype: json of the format: {'resource_id':pk}
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id':pk}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
ValidationError: return json format: {parameter-1': ['error message-1'],
'parameter-2': ['error message-2'], .. }
"""
ACCEPT_FORMATS = ('application/xml', 'application/rdf+xml')
allowed_methods = ('GET', 'PUT')
def get(self, request, pk):
view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
scimeta_url = hydroshare.utils.current_site_url() + AbstractResource.scimeta_url(pk)
return redirect(scimeta_url)
def put(self, request, pk):
# Update science metadata based on resourcemetadata.xml uploaded
resource, authorized, user = view_utils.authorize(
request, pk,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE,
raises_exception=False)
if not authorized:
raise PermissionDenied()
files = request.FILES.values()
if len(files) == 0:
error_msg = {'file': 'No resourcemetadata.xml file was found to update resource '
'metadata.'}
raise ValidationError(detail=error_msg)
elif len(files) > 1:
error_msg = {'file': ('More than one file was found. Only one file, named '
'resourcemetadata.xml, '
'can be used to update resource metadata.')}
raise ValidationError(detail=error_msg)
scimeta = files[0]
if scimeta.content_type not in self.ACCEPT_FORMATS:
error_msg = {'file': ("Uploaded file has content type {t}, "
"but only these types are accepted: {e}.").format(
t=scimeta.content_type, e=",".join(self.ACCEPT_FORMATS))}
raise ValidationError(detail=error_msg)
expect = 'resourcemetadata.xml'
if scimeta.name != expect:
error_msg = {'file': "Uploaded file has name {n}, but expected {e}.".format(
n=scimeta.name, e=expect)}
raise ValidationError(detail=error_msg)
# Temp directory to store resourcemetadata.xml
tmp_dir = tempfile.mkdtemp()
try:
# Fake the bag structure so that GenericResourceMeta.read_metadata_from_resource_bag
# can read and validate the system and science metadata for us.
bag_data_path = os.path.join(tmp_dir, 'data')
os.mkdir(bag_data_path)
# Copy new science metadata to bag data path
scimeta_path = os.path.join(bag_data_path, 'resourcemetadata.xml')
shutil.copy(scimeta.temporary_file_path(), scimeta_path)
# Copy existing resource map to bag data path
# (use a file-like object as the file may be in iRODS, so we can't
# just copy it to a local path)
resmeta_path = os.path.join(bag_data_path, 'resourcemap.xml')
with open(resmeta_path, 'wb') as resmeta:
storage = get_file_storage()
resmeta_irods = storage.open(AbstractResource.sysmeta_path(pk))
shutil.copyfileobj(resmeta_irods, resmeta)
resmeta_irods.close()
try:
# Read resource system and science metadata
domain = Site.objects.get_current().domain
rm = GenericResourceMeta.read_metadata_from_resource_bag(tmp_dir,
hydroshare_host=domain)
# Update resource metadata
rm.write_metadata_to_resource(resource, update_title=True, update_keywords=True)
create_bag_files(resource)
except HsDeserializationDependencyException as e:
msg = ("HsDeserializationDependencyException encountered when updating "
"science metadata for resource {pk}; depedent resource was {dep}.")
msg = msg.format(pk=pk, dep=e.dependency_resource_id)
logger.error(msg)
raise ValidationError(detail=msg)
except HsDeserializationException as e:
raise ValidationError(detail=e.message)
resource_modified(resource, request.user, overwrite_bag=False)
return Response(data={'resource_id': pk}, status=status.HTTP_202_ACCEPTED)
finally:
shutil.rmtree(tmp_dir)
class ResourceMapRetrieve(APIView):
"""
Retrieve resource map
REST URL: hsapi/resource/{pk}/map
HTTP method: GET
:type pk: str
:param pk: id of the resource
:return: resource map as XML document
:rtype: str
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
"""
allowed_methods = ('GET')
def get(self, request, pk):
view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
resmap_url = hydroshare.utils.current_site_url() + AbstractResource.resmap_url(pk)
return redirect(resmap_url)
class ResourceFileCRUD(APIView):
"""
Retrieve, add, update or delete a resource file
REST URL: hsapi/resource/{pk}/files/{filename}
HTTP method: GET
:type pk: str
:type filename: str
:param pk: resource id
:param filename: name of the file to retrieve/download
:return: resource file data
:rtype: file data bytes
REST URL: POST hsapi/resource/{pk}/files/
UNUSED: See ResourceFileListCreate for details.
HTTP method: POST
Request post data: file data (required)
:type pk: str
:param pk: resource id
:return: id of the resource and name of the file added
:rtype: json string of format: {'resource_id':pk, 'file_name': name of the file added}
REST URL: hsapi/resource/{pk}/files/{filename}
HTTP method: PUT
:type pk: str
:type filename: str
:param pk: resource id
:param filename: name of the file to update
:return: id of the resource and name of the file
:rtype: json string of format: {'resource_id':pk, 'file_name': name of the file updates}
REST URL: hsapi/resource/{pk}/files/{filename}
HTTP method: DELETE
:type pk: str
:type filename: str
:param pk: resource id
:param filename: name of the file to delete
:return: id of the resource and name of the file
:rtype: json string of format: {'resource_id':pk, 'file_name': name of the file deleted}
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id':pk}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
ValidationError: return json format: {'parameter-1':['error message-1'],
'parameter-2': ['error message-2'], .. }
"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def initialize_request(self, request, *args, **kwargs):
"""
Hack to work around the following issue in django-rest-framework:
https://github.com/tomchristie/django-rest-framework/issues/3951
Couch: This issue was recently closed (10/12/2016, 2 days before this writing)
and is slated to be incorporated in the Django REST API 3.5.0 release.
At that time, we should remove this hack.
:param request:
:param args:
:param kwargs:
:return:
"""
if not isinstance(request, Request):
# Don't deep copy the file data as it may contain an open file handle
old_file_data = copy.copy(request.FILES)
old_post_data = copy.deepcopy(request.POST)
request = super(ResourceFileCRUD, self).initialize_request(request, *args, **kwargs)
request.POST.update(old_post_data)
request.FILES.update(old_file_data)
return request
def get(self, request, pk, pathname):
resource, _, _ = view_utils.authorize(
request, pk,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
if not resource.supports_folders and '/' in pathname:
return Response("Resource type does not support folders", status.HTTP_403_FORBIDDEN)
try:
view_utils.irods_path_is_allowed(pathname)
except (ValidationError, SuspiciousFileOperation) as ex:
return Response(ex.message, status_code=status.HTTP_400_BAD_REQUEST)
try:
f = hydroshare.get_resource_file(pk, pathname)
except ObjectDoesNotExist:
err_msg = 'File with file name {file_name} does not exist for resource with ' \
'resource id {res_id}'.format(file_name=pathname, res_id=pk)
raise NotFound(detail=err_msg)
# redirects to django_irods/views.download function
# use new internal url for rest call
# TODO: (Couch) Migrate model (with a "data migration") so that this hack is not needed.
redirect_url = f.url.replace('django_irods/download/', 'django_irods/rest_download/')
return HttpResponseRedirect(redirect_url)
def post(self, request, pk, pathname):
"""
Add a file to a resource.
:param request:
:param pk: Primary key of the resource (i.e. resource short ID)
:param pathname: the path to the containing folder in the folder hierarchy
:return:
Leaving out pathname in the URI calls a different class function in ResourceFileListCreate
that stores in the root directory instead.
"""
resource, _, _ = view_utils.authorize(request, pk,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
resource_files = request.FILES.values()
if len(resource_files) == 0:
error_msg = {'file': 'No file was found to add to the resource.'}
raise ValidationError(detail=error_msg)
elif len(resource_files) > 1:
error_msg = {'file': 'More than one file was found. Only one file can be '
'added at a time.'}
raise ValidationError(detail=error_msg)
# TODO: (Brian) I know there has been some discussion when to validate a file
# I agree that we should not validate and extract metadata as part of the file add api
# Once we have a decision, I will change this implementation accordingly. In that case
# we have to implement additional rest endpoints for file validation and extraction.
try:
hydroshare.utils.resource_file_add_pre_process(resource=resource,
files=[resource_files[0]],
user=request.user, extract_metadata=True)
except (hydroshare.utils.ResourceFileSizeException,
hydroshare.utils.ResourceFileValidationException, Exception) as ex:
error_msg = {'file': 'Adding file to resource failed. %s' % ex.message}
raise ValidationError(detail=error_msg)
try:
res_file_objects = hydroshare.utils.resource_file_add_process(resource=resource,
files=[resource_files[0]],
folder=pathname,
user=request.user,
extract_metadata=True)
except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:
error_msg = {'file': 'Adding file to resource failed. %s' % ex.message}
raise ValidationError(detail=error_msg)
# prepare response data
file_name = os.path.basename(res_file_objects[0].resource_file.name)
response_data = {'resource_id': pk, 'file_name': file_name}
resource_modified(resource, request.user, overwrite_bag=False)
return Response(data=response_data, status=status.HTTP_201_CREATED)
def delete(self, request, pk, pathname):
resource, _, user = view_utils.authorize(
request, pk, needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
if not resource.supports_folders and '/' in pathname:
return Response("Resource type does not support folders", status.HTTP_403_FORBIDDEN)
try:
view_utils.irods_path_is_allowed(pathname) # check for hacking attempts
except (ValidationError, SuspiciousFileOperation) as ex:
return Response(ex.message, status=status.HTTP_400_BAD_REQUEST)
try:
hydroshare.delete_resource_file(pk, pathname, user)
except ObjectDoesNotExist as ex: # matching file not found
raise NotFound(detail=ex.message)
# prepare response data
response_data = {'resource_id': pk, 'file_name': pathname}
resource_modified(resource, request.user, overwrite_bag=False)
return Response(data=response_data, status=status.HTTP_200_OK)
def put(self, request, pk, pathname):
# TODO: (Brian) Currently we do not have this action for the front end. Will implement
# in the next iteration. Implement only after we have a decision on when to validate a file
raise NotImplementedError()
class ResourceFileListCreate(ResourceFileToListItemMixin, generics.ListCreateAPIView):
"""
Create a resource file or retrieve a list of resource files
REST URL: hsapi/resource/{pk}/files/
DEPRECATED: hsapi/resource/{pk}/file_list/
HTTP method: GET
:type pk: str
:type filename: str
:param pk: resource id
:param filename: name of the file to retrieve/download
:return: JSON representation of list of files of the form:
REST URL: POST hsapi/resource/{pk}/files/
HTTP method: POST
Request post data: file data (required)
:type pk: str
:param pk: resource id
:return: id of the resource and name of the file added
:rtype: json string of format: {'resource_id':pk, 'file_name': name of the file added}
{
"count": 2,
"next": null,
"previous": null,
"results": [
{
"url": "http://mill24.cep.unc.edu/django_irods/
download/bd88d2a152894134928c587d38cf0272/data/contents/
mytest_resource/text_file.txt",
"size": 21,
"content_type": "text/plain"
},
{
"url": "http://mill24.cep.unc.edu/django_irods/download/
bd88d2a152894134928c587d38cf0272/data/contents/mytest_resource/a_directory/cea.tif",
"size": 270993,
"content_type": "image/tiff"
}
]
}
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id':pk}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
"""
allowed_methods = ('GET', 'POST')
def initialize_request(self, request, *args, **kwargs):
"""
Hack to work around the following issue in django-rest-framework:
https://github.com/tomchristie/django-rest-framework/issues/3951
Couch: This issue was recently closed (10/12/2016, 2 days before this writing)
and is slated to be incorporated in the Django REST API 3.5.0 release.
At that time, we should remove this hack.
:param request:
:param args:
:param kwargs:
:return:
"""
if not isinstance(request, Request):
# Don't deep copy the file data as it may contain an open file handle
old_file_data = copy.copy(request.FILES)
old_post_data = copy.deepcopy(request.POST)
request = super(ResourceFileListCreate, self).initialize_request(
request, *args, **kwargs)
request.POST.update(old_post_data)
request.FILES.update(old_file_data)
return request
def get(self, request, pk):
"""
Get a listing of files within a resource.
:param request:
:param pk: Primary key of the resource (i.e. resource short ID)
:return:
"""
return self.list(request)
def get_queryset(self):
resource, _, _ = view_utils.authorize(self.request, self.kwargs['pk'],
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
resource_file_info_list = []
for f in resource.files.all():
resource_file_info_list.append(self.resourceFileToListItem(f))
return resource_file_info_list
def get_serializer_class(self):
return serializers.ResourceFileSerializer
def post(self, request, pk):
"""
Add a file to a resource.
:param request:
:param pk: Primary key of the resource (i.e. resource short ID)
:return:
"""
resource, _, _ = view_utils.authorize(request, pk,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
resource_files = request.FILES.values()
if len(resource_files) == 0:
error_msg = {'file': 'No file was found to add to the resource.'}
raise ValidationError(detail=error_msg)
elif len(resource_files) > 1:
error_msg = {'file': 'More than one file was found. Only one file can be '
'added at a time.'}
raise ValidationError(detail=error_msg)
# TODO: (Brian) I know there has been some discussion when to validate a file
# I agree that we should not validate and extract metadata as part of the file add api
# Once we have a decision, I will change this implementation accordingly. In that case
# we have to implement additional rest endpoints for file validation and extraction.
try:
hydroshare.utils.resource_file_add_pre_process(resource=resource,
files=[resource_files[0]],
user=request.user, extract_metadata=True)
except (hydroshare.utils.ResourceFileSizeException,
hydroshare.utils.ResourceFileValidationException, Exception) as ex:
error_msg = {'file': 'Adding file to resource failed. %s' % ex.message}
raise ValidationError(detail=error_msg)
try:
res_file_objects = hydroshare.utils.resource_file_add_process(resource=resource,
files=[resource_files[0]],
user=request.user,
extract_metadata=True)
except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:
error_msg = {'file': 'Adding file to resource failed. %s' % ex.message}
raise ValidationError(detail=error_msg)
# prepare response data
file_name = os.path.basename(res_file_objects[0].resource_file.name)
response_data = {'resource_id': pk, 'file_name': file_name}
resource_modified(resource, request.user, overwrite_bag=False)
return Response(data=response_data, status=status.HTTP_201_CREATED)
def _validate_metadata(metadata_list):
"""
Make sure the metadata_list does not have data for the following
core metadata elements. Exception is raised if any of the following elements is present
in metadata_list:
title - (endpoint has a title parameter which should be used for specifying resource title)
subject (keyword) - (endpoint has a keywords parameter which should be used for specifying
resource keywords)
description (abstract)- (endpoint has a abstract parameter which should be used for specifying
resource abstract)
publisher - this element is created upon resource publication
format - this element is created by the system based on the resource content files
date - this element is created by the system
type - this element is created by the system
:param metadata_list: list of dicts each representing data for a specific metadata element
:return:
"""
err_message = "Metadata validation failed. Metadata element '{}' was found in value passed " \
"for parameter 'metadata'. Though it's a valid element it can't be passed " \
"as part of 'metadata' parameter."
for element in metadata_list:
# here k is the name of the element
# v is a dict of all element attributes/field names and field values
k, v = element.items()[0]
if k.lower() in ('title', 'subject', 'description', 'publisher', 'format', 'date', 'type'):
err_message = err_message.format(k.lower())
raise ValidationError(detail=err_message)
|
FescueFungiShare/hydroshare
|
hs_core/views/resource_rest_api.py
|
Python
|
bsd-3-clause
| 48,034
|
[
"Brian"
] |
3ec6b78fa4bf94835401dd5b0c0856c894a347abc7066123a1c928f7c63d08e0
|
"""
Backports. Mostly from scikit-learn
"""
import numpy as np
from scipy import linalg
###############################################################################
# For scikit-learn < 0.14
def _pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = _pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
try:
from sklearn.utils.extmath import pinvh
except ImportError:
pinvh = _pinvh
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
try:
from sklearn.mixture import log_multivariate_normal_density
except ImportError:
# New in 0.14
log_multivariate_normal_density = _log_multivariate_normal_density
def _distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
try:
from sklearn.mixture import _distribute_covar_matrix_to_match_covariance_type
except ImportError:
# New in 0.14
distribute_covar_matrix_to_match_covariance_type =\
_distribute_covar_matrix_to_match_covariance_type
|
emmaggie/hmmlearn
|
hmmlearn/utils/fixes.py
|
Python
|
bsd-3-clause
| 6,917
|
[
"Gaussian"
] |
f90d527c8386cbb5458998411d0e2c1e84b5f17e13f57bdd12a66952441d7162
|
"""Mixture model for matrix completion"""
from typing import Tuple
import numpy as np
from scipy.special import logsumexp
from common import GaussianMixture
from scipy.special import logsumexp
from scipy.stats import multivariate_normal
def log_pdf_multivariate_gauss(x, mu, cov):
#assert(mu.shape[0] > mu.shape[1]), 'mu must be a row vector'
#assert(x.shape[0] > x.shape[1]), 'x must be a row vector'
#assert(cov.shape[0] == cov.shape[1]), 'covariance matrix must be square'
#assert(mu.shape[0] == cov.shape[0]), 'cov_mat and mu_vec must have the same dimensions'
#assert(mu.shape[0] == x.shape[0]), 'mu and x must have the same dimensions'
logdet = (1/2)*len(mu)*np.log(cov)
#invcov = 1/cov[0,0]*np.eye(len(mu)) # np.linalg.inv(cov)# cov/cov[0,0]
xmu = x-mu
part1 = np.log(1) - (len(mu)/2)*np.log(2* np.pi) - logdet
#part2 = (-1/2) * xmu.T.dot(invcov).dot(xmu)
part2 = (-1/2) * xmu.dot(xmu) / cov
return part1 + part2
def pdf_multivariate_gauss(x, mu, cov):
#assert(mu.shape[0] > mu.shape[1]), 'mu must be a row vector'
#assert(x.shape[0] > x.shape[1]), 'x must be a row vector'
assert(cov.shape[0] == cov.shape[1]), 'covariance matrix must be square'
assert(mu.shape[0] == cov.shape[0]), 'cov_mat and mu_vec must have the same dimensions'
assert(mu.shape[0] == x.shape[0]), 'mu and x must have the same dimensions'
part1 = 1 / ( ((2* np.pi)**(len(mu)/2)) * (np.linalg.det(cov)**(1/2)) )
part2 = (-1/2) * ((x-mu).T.dot(np.linalg.inv(cov))).dot((x-mu))
return float(part1 * np.exp(part2))
def Gaussian(X, mu, var):
return pdf_multivariate_gauss(X,mu,var)
#return multivariate_normal.pdf(X, mean=mu, cov=np.eye(mu.shape[0])*var)
# from multiprocessing.dummy import Pool as ThreadPool
# pool = ThreadPool(8)
def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[np.ndarray, float]:
"""E-step: Softly assigns each datapoint to a gaussian component
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
mixture: the current gaussian mixture
Returns:
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the assignment
"""
softcounts = np.zeros((X.shape[0], mixture.mu.shape[0]))
nz = X != 0
cus = {}
for n in range(X.shape[0]):
cus[n] = X[n, nz[n]]
nzcount = np.count_nonzero(X, axis=1)
for k in range(mixture.mu.shape[0]):
softcounts[:,k] += np.log(mixture.p[k] + 1e-16)
for n in range(X.shape[0]):
if(nzcount[n] != 0):
idx = nz[n]
row = cus[n]
rowmu = mixture.mu[k, idx]
#rowvar = mixture.var[k]*np.eye(row.shape[0])
rowvar = mixture.var[k]
l = log_pdf_multivariate_gauss(row, rowmu, rowvar)
softcounts[n,k] += l
denominator = logsumexp(softcounts, axis=1)
log_likelihood = np.sum(denominator)
denominator = denominator.repeat(mixture.mu.shape[0]).reshape((X.shape[0], mixture.mu.shape[0]))
post = np.exp(softcounts-denominator)
return (post, log_likelihood)
def mstep(X: np.ndarray, post: np.ndarray, mixture: GaussianMixture,
min_variance: float = .25) -> GaussianMixture:
"""M-step: Updates the gaussian mixture by maximizing the log-likelihood
of the weighted dataset
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
post: (n, K) array holding the soft counts
for all components for all examples
mixture: the current gaussian mixture
min_variance: the minimum variance for each gaussian
Returns:
GaussianMixture: the new gaussian mixture
"""
new_mus = np.zeros((post.shape[1], X.shape[1]))
new_mus_d = np.zeros((post.shape[1], X.shape[1]))
new_vars = np.zeros((post.shape[1], ))
new_ps = np.zeros((post.shape[1], ))
nz = X != 0
zero_one_by_l = np.where(X != 0, 1, 0)
for k in range(post.shape[1]):
new_vars[k] = 0
post_all_k = post[:,k]
for l in range(X.shape[1]):
prob_cu = post_all_k * zero_one_by_l[:,l]
new_mus_d[k,l] += np.sum(prob_cu)
if (new_mus_d[k,l] >= 1):
x_all_l = X[:,l]
new_mus[k,l] = np.sum(prob_cu * x_all_l) / new_mus_d[k,l]
else:
new_mus[k,l] = mixture.mu[k,l]
denominator = 0
new_vars[k] = 0
for u in range(X.shape[0]):
denominator += np.count_nonzero(X[u,:]) * post[u,k]
w = zero_one_by_l[u,:] #np.where(X[u,:] != 0, 1, 0)
new_vars[k] += np.sum(post[u,k] * w * (X[u,:] - new_mus[k,:])**2)
new_vars[k] /= denominator
if(new_vars[k] < 0.25): new_vars[k] = 0.25
new_ps[k] += np.sum(post_all_k) / X.shape[0]
return GaussianMixture(new_mus, new_vars, new_ps)
def run(X: np.ndarray, mixture: GaussianMixture,
post: np.ndarray) -> Tuple[GaussianMixture, np.ndarray, float]:
"""Runs the mixture model
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the current assignment
"""
#print(mixture)
old_log_likelihood = None
max_steps = 1000
converged = False
while((not converged) and (max_steps > 0)):
print('e', end='', flush=True)
post, new_log_likelihood = estep(X, mixture)
print('m', end='', flush=True)
mixture = mstep(X, post, mixture)
if(old_log_likelihood == None):
old_log_likelihood = new_log_likelihood
else:
diff = new_log_likelihood - old_log_likelihood
margin = 1e-6 * np.abs(new_log_likelihood)
#print(diff, margin)
if(diff <= margin):
converged = True
old_log_likelihood = new_log_likelihood
max_steps -= 1
return mixture, post, old_log_likelihood
def fill_matrix(X: np.ndarray, mixture: GaussianMixture) -> np.ndarray:
"""Fills an incomplete matrix according to a mixture model
Args:
X: (n, d) array of incomplete data (incomplete entries =0)
mixture: a mixture of gaussians
Returns
np.ndarray: a (n, d) array with completed data
"""
softcounts = np.zeros((X.shape[0], mixture.mu.shape[0]))
nz = X != 0
cus = {}
for n in range(X.shape[0]):
cus[n] = X[n, nz[n]]
nzcount = np.count_nonzero(X, axis=1)
for k in range(mixture.mu.shape[0]):
softcounts[:,k] += np.log(mixture.p[k] + 1e-16)
for n in range(X.shape[0]):
if(nzcount[n] != 0):
idx = nz[n]
row = cus[n]
rowmu = mixture.mu[k, idx]
#rowvar = mixture.var[k]*np.eye(row.shape[0])
rowvar = mixture.var[k]
l = log_pdf_multivariate_gauss(row, rowmu, rowvar)
softcounts[n,k] += l
denominator = logsumexp(softcounts, axis=1)
denominator = denominator.repeat(mixture.mu.shape[0]).reshape((X.shape[0], mixture.mu.shape[0]))
post = np.exp(softcounts-denominator)
Xstar = np.copy(X)
for u in range(X.shape[0]):
for l in range(X.shape[1]):
if(X[u,l] == 0):
Xstar[u,l] = np.sum(post[u,:] * mixture.mu[:,l])
return Xstar
|
xunilrj/sandbox
|
courses/MITx/MITx 6.86x Machine Learning with Python-From Linear Models to Deep Learning/project4/netflix/em.py
|
Python
|
apache-2.0
| 7,765
|
[
"Gaussian"
] |
2b8c5bfbf5b94357dfd1f1c4d2936500a44405c2933fb6293fd729be7415dbe1
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs client tasks for testing IMAP OAuth2 authentication.
To use this script, you'll need to have registered with Google as an OAuth
application and obtained an OAuth client ID and client secret.
See http://code.google.com/apis/accounts/docs/OAuth2.html for instructions on
registering and for documentation of the APIs invoked by this code.
This script has 3 modes of operation.
1. The first mode is used to generate and authorize an OAuth2 token, the
first step in logging in via OAuth2.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--generate_oauth2_token
The script will converse with Google and generate an oauth request
token, then present you with a URL you should visit in your browser to
authorize the token. Once you get the verification code from the Google
website, enter it into the script to get your OAuth access token. The output
from this command will contain the access token, a refresh token, and some
metadata about the tokens. The access token can be used until it expires, and
the refresh token lasts indefinitely, so you should record these values for
reuse.
2. The script will generate new access tokens using a refresh token.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--refresh_token=1/Yzm6MRy4q1xi7Dx2DuWXNgT6s37OrP_DW_IoyTum4YA
3. The script will generate an OAuth2 string that can be fed
directly to IMAP or SMTP. This is triggered with the --generate_oauth2_string
option.
oauth2 --generate_oauth2_string --user=xxx@gmail.com \
--access_token=ya29.AGy[...]ezLg
The output of this mode will be a base64-encoded string. To use it, connect to a
IMAPFE and pass it as the second argument to the AUTHENTICATE command.
a AUTHENTICATE XOAUTH2 a9sha9sfs[...]9dfja929dk==
"""
import base64
import imaplib
import json
from optparse import OptionParser
import smtplib
import sys
import urllib
def SetupOptionParser():
# Usage message is the module's docstring.
parser = OptionParser(usage=__doc__)
parser.add_option('--generate_oauth2_token',
action='store_true',
dest='generate_oauth2_token',
help='generates an OAuth2 token for testing')
parser.add_option('--generate_oauth2_string',
action='store_true',
dest='generate_oauth2_string',
help='generates an initial client response string for '
'OAuth2')
parser.add_option('--client_id',
default=None,
help='Client ID of the application that is authenticating. '
'See OAuth2 documentation for details.')
parser.add_option('--client_secret',
default=None,
help='Client secret of the application that is '
'authenticating. See OAuth2 documentation for '
'details.')
parser.add_option('--access_token',
default=None,
help='OAuth2 access token')
parser.add_option('--refresh_token',
default=None,
help='OAuth2 refresh token')
parser.add_option('--scope',
default='https://mail.google.com/',
help='scope for the access token. Multiple scopes can be '
'listed separated by spaces with the whole argument '
'quoted.')
parser.add_option('--test_imap_authentication',
action='store_true',
dest='test_imap_authentication',
help='attempts to authenticate to IMAP')
parser.add_option('--test_smtp_authentication',
action='store_true',
dest='test_smtp_authentication',
help='attempts to authenticate to SMTP')
parser.add_option('--user',
default=None,
help='email address of user whose account is being '
'accessed')
return parser
# The URL root for accessing Google Accounts.
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
# Hardcoded dummy redirect URI for non-web apps.
REDIRECT_URI = 'REDIRECTURI FOR YOUR APP'
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
def UrlEscape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.quote(text, safe='~-._')
def UrlUnescape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.unquote(text)
def FormatUrlParams(params):
"""Formats parameters into a URL query string.
Args:
params: A key-value map.
Returns:
A URL query string version of the given parameters.
"""
param_fragments = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_fragments.append('%s=%s' % (param[0], UrlEscape(param[1])))
return '&'.join(param_fragments)
def GeneratePermissionUrl(client_id,useremail, scope='https://mail.google.com/ https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['state'] = useremail
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params))
def AuthorizeTokens(client_id, client_secret, authorization_code):
"""Obtains OAuth access token and refresh token.
This uses the application portion of the "OAuth2 for Installed Applications"
flow at https://developers.google.com/accounts/docs/OAuth2InstalledApp#handlingtheresponse
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
authorization_code: code generated by Google Accounts after user grants
permission.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def RefreshToken(client_id, client_secret, refresh_token):
"""Obtains a new token given a refresh token.
See https://developers.google.com/accounts/docs/OAuth2InstalledApp#refresh
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
refresh_token: A previously-obtained refresh token.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['refresh_token'] = refresh_token
params['grant_type'] = 'refresh_token'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def GenerateOAuth2String(username, access_token, base64_encode=True):
"""Generates an IMAP OAuth2 authentication string.
See https://developers.google.com/google-apps/gmail/oauth2_overview
Args:
username: the username (email address) of the account to authenticate
access_token: An OAuth2 access token.
base64_encode: Whether to base64-encode the output.
Returns:
The SASL argument for the OAuth2 mechanism.
"""
auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token)
if base64_encode:
auth_string = base64.b64encode(auth_string)
return auth_string
def TestImapAuthentication(user, auth_string):
"""Authenticates to IMAP with the given auth_string.
Prints a debug trace of the attempted IMAP connection.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, as returned by GenerateOAuth2String.
Must not be base64-encoded, since imaplib does its own base64-encoding.
"""
print
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: auth_string)
imap_conn.select('INBOX')
def TestSmtpAuthentication(user, auth_string):
"""Authenticates to SMTP with the given auth_string.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, not base64-encoded, as returned by
GenerateOAuth2String.
"""
print
smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)
smtp_conn.set_debuglevel(True)
smtp_conn.ehlo('test')
smtp_conn.starttls()
smtp_conn.docmd('AUTH', 'XOAUTH2 ' + base64.b64encode(auth_string))
def RequireOptions(options, *args):
missing = [arg for arg in args if getattr(options, arg) is None]
if missing:
print 'Missing options: %s' % ' '.join(missing)
sys.exit(-1)
def main(argv):
options_parser = SetupOptionParser()
(options, args) = options_parser.parse_args()
if options.refresh_token:
RequireOptions(options, 'client_id', 'client_secret')
response = RefreshToken(options.client_id, options.client_secret,
options.refresh_token)
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.generate_oauth2_string:
RequireOptions(options, 'user', 'access_token')
print ('OAuth2 argument:\n' +
GenerateOAuth2String(options.user, options.access_token))
elif options.generate_oauth2_token:
RequireOptions(options, 'client_id', 'client_secret')
print 'To authorize token, visit this url and follow the directions:'
print ' %s' % GeneratePermissionUrl(options.client_id, options.scope)
authorization_code = raw_input('Enter verification code: ')
response = AuthorizeTokens(options.client_id, options.client_secret,
authorization_code)
print 'Refresh Token: %s' % response['refresh_token']
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.test_imap_authentication:
RequireOptions(options, 'user', 'access_token')
TestImapAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
elif options.test_smtp_authentication:
RequireOptions(options, 'user', 'access_token')
TestSmtpAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
else:
options_parser.print_help()
print 'Nothing to do, exiting.'
return
|
codeanu/flask-login-oauth2
|
modules/oauth2.py
|
Python
|
bsd-3-clause
| 12,286
|
[
"VisIt"
] |
43abe0d819291ec4948b377aca4cdbc5ed67d6769de97640f6af86f2386afee7
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gromacs(CMakePackage):
"""GROMACS (GROningen MAchine for Chemical Simulations) is a molecular
dynamics package primarily designed for simulations of proteins, lipids
and nucleic acids. It was originally developed in the Biophysical
Chemistry department of University of Groningen, and is now maintained
by contributors in universities and research centers across the world.
GROMACS is one of the fastest and most popular software packages
available and can run on CPUs as well as GPUs. It is free, open source
released under the GNU General Public License. Starting from version 4.6,
GROMACS is released under the GNU Lesser General Public License.
"""
homepage = 'http://www.gromacs.org'
url = 'http://ftp.gromacs.org/gromacs/gromacs-5.1.2.tar.gz'
version('2016.4', '19c8b5c85f3ec62df79d2249a3c272f8')
version('2016.3', 'e9e3a41bd123b52fbcc6b32d09f8202b')
version('5.1.4', 'ba2e34d59b3982603b4935d650c08040')
version('5.1.2', '614d0be372f1a6f1f36382b7a6fcab98')
version('develop', git='https://github.com/gromacs/gromacs', branch='master')
variant('mpi', default=True, description='Activate MPI support')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant(
'double', default=False,
description='Produces a double precision version of the executables')
variant('plumed', default=False, description='Enable PLUMED support')
variant('cuda', default=False, description='Enable CUDA support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel',
'Reference', 'RelWithAssert', 'Profile'))
depends_on('mpi', when='+mpi')
depends_on('plumed+mpi', when='+plumed+mpi')
depends_on('plumed~mpi', when='+plumed~mpi')
depends_on('fftw')
depends_on('cmake@2.8.8:', type='build')
depends_on('cuda', when='+cuda')
def patch(self):
if '+plumed' in self.spec:
self.spec['plumed'].package.apply_patch(self)
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DGMX_MPI:BOOL=ON')
if '+double' in self.spec:
options.append('-DGMX_DOUBLE:BOOL=ON')
if '~shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+cuda' in self.spec:
options.append('-DGMX_GPU:BOOL=ON')
options.append('-DCUDA_TOOLKIT_ROOT_DIR:STRING=' +
self.spec['cuda'].prefix)
return options
|
skosukhin/spack
|
var/spack/repos/builtin/packages/gromacs/package.py
|
Python
|
lgpl-2.1
| 3,939
|
[
"Gromacs"
] |
f928db7daea4b4478b8089c14e17980fc1d38d8c1f207ac61757fcd5b271b103
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import numpy as np
import pandas as pd
from pymatgen import Element
from veidt.potential.abstract import Potential
from veidt.potential.processing import pool_from, convert_docs
from veidt.describer.atomic_describer import BispectrumCoefficients
from veidt.potential.lammps.calcs import EnergyForceStress
class SNAPotential(Potential):
"""
This class implements Spectral Neighbor Analysis Potential.
"""
pair_style = 'pair_style snap'
pair_coeff = 'pair_coeff * * {coeff_file} {elements} {param_file} {specie}'
def __init__(self, model, name=None):
"""
Initialize the SNAPotential Potential with atomic describer
and model, which are used to generate the Bispectrum coefficients
features for structures and to train the parameters.
Args:
model (Model): Model to perform supervised learning with
atomic descriptos as features and properties as targets.
name (str): Name of force field.
"""
self.name = name if name else 'SNAPotential'
self.model = model
self.specie = None
def train(self, train_structures, energies, forces, stresses=None, **kwargs):
"""
Training data with model.
Args:
train_structures ([Structure]): The list of Pymatgen Structure object.
energies ([float]): The list of total energies of each structure
in structures list.
energies ([float]): List of total energies of each structure in
structures list.
forces ([np.array]): List of (m, 3) forces array of each structure
with m atoms in structures list. m can be varied with each
single structure case.
stresses (list): List of (6, ) virial stresses of each
structure in structures list.
"""
train_pool = pool_from(train_structures, energies, forces, stresses)
_, df = convert_docs(train_pool)
ytrain = df['y_orig'] / df['n']
self.model.fit(inputs=train_structures, outputs=ytrain, **kwargs)
self.specie = Element(train_structures[0].symbol_set[0])
def evaluate(self, test_structures, ref_energies, ref_forces, ref_stresses):
"""
Evaluate energies, forces and stresses of structures with trained
interatomic potential.
Args:
test_structures ([Structure]): List of Pymatgen Structure Objects.
ref_energies ([float]): List of DFT-calculated total energies of
each structure in structures list.
ref_forces ([np.array]): List of DFT-calculated (m, 3) forces of
each structure with m atoms in structures list. m can be varied
with each single structure case.
ref_stresses (list): List of DFT-calculated (6, ) viriral stresses
of each structure in structures list.
"""
predict_pool = pool_from(test_structures, ref_energies,
ref_forces, ref_stresses)
_, df_orig = convert_docs(predict_pool)
_, df_predict = convert_docs(pool_from(test_structures))
outputs = self.model.predict(inputs=test_structures, override=True)
df_predict['y_orig'] = df_predict['n'] * outputs
return df_orig, df_predict
def predict(self, structure):
"""
Predict energy, forces and stresses of the structure.
Args:
structure (Structure): Pymatgen Structure object.
Returns:
energy, forces, stress
"""
# outputs = self.model.predict([structure])
# energy = outputs[0]
# forces = outputs[1:].reshape(len(structure), 3)
calculator = EnergyForceStress(ff_settings=self)
energy, forces, stress = calculator.calculate(structures=[structure])[0]
return energy, forces, stress
def write_param(self):
"""
Write parameter and coefficient file to perform lammps calculation.
"""
if not self.specie:
raise ValueError("No specie given!")
param_file = '{}.snapparam'.format(self.name)
coeff_file = '{}.snapcoeff'.format(self.name)
model = self.model
# ncoeff = len(model.coef)
describer = self.model.describer
profile = describer.element_profile
elements = [element.symbol for element
in sorted([Element(e) for e in profile.keys()])]
ne = len(elements)
nbc = len(describer.subscripts)
if describer.quadratic:
nbc += int((1 + nbc) * nbc / 2)
tjm = describer.twojmax
diag = describer.diagonalstyle
# assert ncoeff == ne * (nbc + 1),\
# '{} coefficients given. '.format(ncoeff) + \
# '{} ({} * ({} + 1)) '.format(ne * (nbc + 1), ne, nbc) + \
# 'coefficients expected ' + \
# 'for twojmax={} and diagonalstyle={}.'.format(tjm, diag)
coeff_lines = []
coeff_lines.append('{} {}'.format(ne, nbc + 1))
for element, coeff in zip(elements, np.split(model.coef, ne)):
coeff_lines.append('{} {} {}'.format(element,
profile[element]['r'],
profile[element]['w']))
coeff_lines.extend([str(c) for c in coeff])
with open(coeff_file, 'w') as f:
f.write('\n'.join(coeff_lines))
param_lines = []
keys = ['rcutfac', 'twojmax', 'rfac0', 'rmin0', 'diagonalstyle']
param_lines.extend(['{} {}'.format(k, getattr(describer, k))
for k in keys])
param_lines.append('quadraticflag {}'.format(int(describer.quadratic)))
param_lines.append('bzeroflag 0')
with open(param_file, 'w') as f:
f.write('\n'.join(param_lines))
pair_coeff = self.pair_coeff.format(elements=' '.join(elements),
specie=self.specie.name,
coeff_file=coeff_file,
param_file=param_file)
ff_settings = [self.pair_style, pair_coeff]
return ff_settings
def save(self, filename):
"""
Save parameters of the potential.
Args:
filename (str): The file to store parameters of potential.
Returns:
(str)
"""
self.model.save(filename=filename)
return filename
|
czhengsci/veidt
|
veidt/potential/snap.py
|
Python
|
bsd-3-clause
| 6,745
|
[
"LAMMPS",
"pymatgen"
] |
7cc01bdcce2cbd34d6c7a28c05e2ea96940241cc42adaa356fae03be7caf0ffd
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
from roppy.sample import sample2D, sample2DU, sample2DV
from roppy.depth import sdepth
class Section(object):
"""Class for handling sections in a ROMS grid
The section is defined by a sequence of nodes, supposedly quite close
The endpoints of the section are nodes
The grid information is defined by a grid object having attributes
h, pm, pn, hc, Cs_r, Cs_w, Vtransform
with the ROMS variables of the same netCDF names
Defined by sequences of grid coordinates of section nodes
"""
def __init__(self, grid, X, Y):
self.grid = grid
# Vertices, in subgrid coordinates
self.X = X
self.Y = Y
# Section size
self.L = len(self.X) # Number of nodes
self.N = len(self.grid.Cs_r)
# Topography
self.h = sample2D(
self.grid.h, self.X, self.Y, mask=self.grid.mask_rho, undef_value=1.0
)
# Metric
pm = sample2D(self.grid.pm, self.X, self.Y)
pn = sample2D(self.grid.pn, self.X, self.Y)
dX = 2 * (X[1:] - X[:-1]) / (pm[:-1] + pm[1:]) # unit = meter
dY = 2 * (Y[1:] - Y[:-1]) / (pn[:-1] + pn[1:])
# Assume spacing is close enough to approximate distance
self.dS = np.sqrt(dX * dX + dY * dY)
# Cumulative distance
self.S = np.concatenate(([0], np.add.accumulate(self.dS)))
# Weights for trapez integration (linear interpolation)
self.W = 0.5 * np.concatenate(
([self.dS[0]], self.dS[:-1] + self.dS[1:], [self.dS[-1]])
)
# nx, ny = dY, -dX
# norm = np.sqrt(nx*nx + ny*ny)
# self.nx, self.ny = nx/norm, ny/norm
# Vertical structure
self.z_r = sdepth(
self.h,
self.grid.hc,
self.grid.Cs_r,
stagger="rho",
Vtransform=self.grid.Vtransform,
)
self.z_w = sdepth(
self.h,
self.grid.hc,
self.grid.Cs_w,
stagger="w",
Vtransform=self.grid.Vtransform,
)
self.dZ = self.z_w[1:, :] - self.z_w[:-1, :]
self.Area = self.dZ * self.W
def __len__(self):
return self.L
def sample2D(self, F):
"""Sample a horizontal field at rho poins with shape (Mp, Lp)"""
return sample2D(F, self.X, self.Y, mask=self.grid.mask_rho)
def sample3D(self, F):
"""Sample a 3D field in rho-points with shape (N,Mp,Lp)"""
# Not masked ??
Fsec = np.zeros((self.N, self.L))
for k in range(self.N):
Fsec[k, :] = sample2D(F[k, :, :], self.X, self.Y, mask=self.grid.mask_rho)
return Fsec
def linear_section(i0, i1, j0, j1, grd):
"""Make a linear section between rho-points
Makes a section similar to romstools' tools.transect
Returns a section object
"""
if abs(i1 - i0) >= abs(j0 - j1): # Work horizontally
if i0 < i1:
X = np.arange(i0, i1 + 1)
elif i0 > i1:
X = np.arange(i0, i1 - 1, -1)
else: # i0 = i1 and j0 = j1
raise ValueError("Section reduced to a point")
slope = float(j1 - j0) / (i1 - i0)
Y = j0 + slope * (X - i0)
else: # Work vertically
if j0 < j1:
Y = np.arange(j0, j1 + 1)
else:
Y = np.arange(j0, j1 - 1, -1)
slope = float(i1 - i0) / (j1 - j0)
X = i0 + slope * (Y - j0)
return Section(grd, X, Y)
|
bjornaa/roppy
|
roppy/section.py
|
Python
|
mit
| 3,550
|
[
"NetCDF"
] |
ecf80360922e32b32b41c6388c2b0eada571ec1885136faf56fca7fd158538c9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
lineColor = colors.GetColor3d("peacock")
modelColor = colors.GetColor3d("silver")
backgroundColor = colors.GetColor3d("wheat")
modelSource = vtk.vtkSphereSource()
plane = vtk.vtkPlane()
cutter = vtk.vtkCutter()
cutter.SetInputConnection(modelSource.GetOutputPort())
cutter.SetCutFunction(plane)
cutter.GenerateValues(10, -.5, .5)
modelMapper = vtk.vtkPolyDataMapper()
modelMapper.SetInputConnection(modelSource.GetOutputPort())
model = vtk.vtkActor()
model.SetMapper(modelMapper)
model.GetProperty().SetDiffuseColor(modelColor)
model.GetProperty().SetInterpolationToFlat()
stripper = vtk.vtkStripper()
stripper.SetInputConnection(cutter.GetOutputPort())
stripper.JoinContiguousSegmentsOn()
linesMapper = vtk.vtkPolyDataMapper()
linesMapper.SetInputConnection(stripper.GetOutputPort())
lines = vtk.vtkActor()
lines.SetMapper(linesMapper)
lines.GetProperty().SetDiffuseColor(lineColor)
lines.GetProperty().SetLineWidth(3.)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetSize(640, 480)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Add the actors to the renderer.
renderer.AddActor(model)
renderer.AddActor(lines)
renderer.SetBackground(backgroundColor)
# This starts the event loop and as a side effect causes an
# initial render.
renderWindow.Render()
interactor.Start()
# Extract the lines from the polydata.
numberOfLines = cutter.GetOutput().GetNumberOfLines()
print("-----------Lines without using vtkStripper")
print("There are {0} lines in the polydata".format(numberOfLines))
numberOfLines = stripper.GetOutput().GetNumberOfLines()
points = stripper.GetOutput().GetPoints()
cells = stripper.GetOutput().GetLines()
cells.InitTraversal()
print("-----------Lines using vtkStripper")
print("There are {0} lines in the polydata".format(numberOfLines))
indices = vtk.vtkIdList()
lineCount = 0
while cells.GetNextCell(indices):
print("Line {0}:".format(lineCount))
for i in range(indices.GetNumberOfIds()):
point = points.GetPoint(indices.GetId(i))
print("\t({0:0.6f} ,{1:0.6f}, {2:0.6f})".format(point[0], point[1], point[2]))
lineCount += 1
if __name__ == "__main__":
main()
|
lorensen/VTKExamples
|
src/Python/PolyData/ExtractPolyLinesFromPolyData.py
|
Python
|
apache-2.0
| 2,569
|
[
"VTK"
] |
2f92fe1e6681bce8e89d6f6eaea68f786ad6be6c26bb2bf07a07151338c49fef
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
n0 = 25
n1 = 25
numLambdas = 3
startLambda = 0
endLambda = 1
display = False
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Place two 2D finite-difference matrices next to each other
# and make the last column dense
def ConcatFD2D(N0,N1):
A = El.DistSparseMatrix()
height = N0*N1
width = 2*N0*N1
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(11*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
x0 = s % N0
x1 = s / N0
sRel = s + N0*N1
A.QueueLocalUpdate( sLoc, s, 11 )
A.QueueLocalUpdate( sLoc, sRel, -20 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, s-1, -1 )
A.QueueLocalUpdate( sLoc, sRel-1, -17 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, s+1, 2 )
A.QueueLocalUpdate( sLoc, sRel+1, -20 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, s-N0, -30 )
A.QueueLocalUpdate( sLoc, sRel-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, s+N0, 4 )
A.QueueLocalUpdate( sLoc, sRel+N0, 3 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -10/height );
A.ProcessQueues()
return A
A = ConcatFD2D(n0,n1)
b = El.DistMultiVec()
El.Gaussian( b, n0*n1, 1 )
if display:
El.Display( A, "A" )
El.Display( A[0:n0*n1,0:n0*n1], "AL" )
El.Display( A[0:n0*n1,n0*n1:2*n0*n1], "AR" )
El.Display( b, "b" )
ctrl = El.BPDNCtrl_d()
ctrl.ipmCtrl.mehrotraCtrl.time = True
ctrl.ipmCtrl.mehrotraCtrl.progress = True
ctrl.ipmCtrl.mehrotraCtrl.solveCtrl.progress = True
for j in xrange(0,numLambdas):
lambd = startLambda + j*(endLambda-startLambda)/(numLambdas-1.)
if worldRank == 0:
print "lambda =", lambd
startBPDN = El.mpi.Time()
x = El.BPDN( A, b, lambd, ctrl )
endBPDN = El.mpi.Time()
if worldRank == 0:
print "BPDN time:", endBPDN-startBPDN, "seconds"
if display:
El.Display( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
e = El.DistMultiVec()
El.Copy( b, e )
El.Multiply( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| A x - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
mcopik/Elemental
|
examples/interface/BPDN.py
|
Python
|
bsd-3-clause
| 2,579
|
[
"Gaussian"
] |
b14bbe0ef1cf43282d256a5879f581554aa2a7fed7ba37cc6d01462b2472edac
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import random
from .node import Input, Output, Neuron
class Network(dict):
'''
A network composed of input, neuron, and output nodes. It can be modeled
as a unidirectional directed graph with edge weights.
'''
def __init__(self):
'''
Create an empty network.
'''
super(Network, self).__init__()
self.weights = {}
@property
def inputs(self):
'''
Network input nodes.
'''
return dict((k, self[k]) for k in self if isinstance(self[k], Input))
@property
def outputs(self):
'''
Network output nodes.
'''
return dict((k, self[k]) for k in self if isinstance(self[k], Output))
@property
def neurons(self):
'''
Network neuron nodes.
'''
return dict((k, self[k]) for k in self if isinstance(self[k], Neuron))
def connect(self, source, target, weight=None):
'''
Connect two nodes.
'''
weight = weight or random.uniform(-1.0, 1.0)
self.weights[(source, target)] = weight
def get_inputs(self, node):
'''
Get input connections for the given node.
'''
if node not in self.keys():
# XXX error?
return []
return sorted(
[k for k in self.weights if k[1] == node],
key=lambda k: k[0])
def get_outputs(self, node):
'''
Get output connections for the given node.
'''
if node not in self.keys():
# XXX error?
return []
return sorted(
[k for k in self.weights if k[0] == node],
key=lambda k: k[1])
|
CtrlC-Root/cse5526
|
spark/network.py
|
Python
|
mit
| 1,768
|
[
"NEURON"
] |
5224f60d62ce73de4fde9bf70486dfa7ac2df643d11f5b91627ce5621bd6d426
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from collections import namedtuple
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
# for root finding for continuous distribution ppf, and max likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(order, %(shapes)s, loc=0, scale=1)
Non-central moment of the specified order.
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(confidence, %(shapes)s, loc=0, scale=1)
Confidence interval with equal areas around the median.
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, order=None, **kwds):
return self.dist.moment(order, *self.args, **self.kwds, **kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, confidence=None, **kwds):
return self.dist.interval(confidence, *self.args, **self.kwds, **kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# broadcast arrays with cond
*newargs, cond = np.broadcast_arrays(*newargs, cond)
return [arg.ravel() for arg in newargs]
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
# Copy of _ncx2_log_pdf avoiding np.log(0) when corr = 0
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
# For historical reasons, `size` was made an attribute that was read
# inside _rvs(). The code is being changed so that 'size'
# is an argument
# to self._rvs(). However some external (non-SciPy) distributions
# have not
# been updated. Maintain backwards compatibility by checking if
# the self._rvs() signature has the 'size' keyword, or a **kwarg,
# and if not set self._size inside self.rvs()
# before calling self._rvs().
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
# Warn on first use only
self._rvs_size_warned = False
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
message = ("Domain error in arguments. The `scale` parameter must "
"be positive for all distributions; see the "
"distribution documentation for other restrictions.")
raise ValueError(message)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if g1 is None:
mu3 = None
else:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, order=None, *args, **kwds):
"""non-central moment of distribution of specified order.
.. deprecated:: 1.9.0
Parameter `n` is replaced by parameter `order` to avoid name
collisions with the shape parameter `n` of several distributions.
Parameter `n` will be removed in the second release after 1.9.0.
Parameters
----------
order : int, order >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
# This function was originally written with parameter `n`, but `n`
# is also the name of many distribution shape parameters.
# This block allows the function to accept both `n` and its
# replacement `order` during a deprecation period; it can be removed
# in the second release after 1.9.0.
# The logic to provide a DeprecationWarning only when `n` is passed
# as a keyword, accept the new keyword `order`, and otherwise be
# backward-compatible deserves explanation. We need to look out for
# the following:
# * Does the distribution have a shape named `n`?
# * Is `order` provided? It doesn't matter whether it is provided as a
# positional or keyword argument; it will be used as the order of the
# moment rather than a distribution shape parameter because:
# - The first positional argument of `moment` has always been the
# order of the moment.
# - The keyword `order` is new, so it's unambiguous that it refers to
# the order of the moment.
# * Is `n` provided as a keyword argument? It _does_ matter whether it
# is provided as a positional or keyword argument.
# - The first positional argument of `moment` has always been the
# order of moment, but
# - if `n` is provided as a keyword argument, its meaning depends
# on whether the distribution accepts `n` as a shape parameter.
has_shape_n = (self.shapes is not None
and "n" in (self.shapes.split(", ")))
got_order = order is not None
got_keyword_n = kwds.get("n", None) is not None
# These lead to the following cases.
# Case A: If the distribution _does_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this is now OK:
# it is unambiguous that `order` is the order of the moment and `n`
# is the shape parameter. Previously, this would have caused an
# error because `n` was provided both as a keyword argument and
# as the first positional argument. I don't think it is credible for
# users to rely on this error in their code, though, so I don't see
# this as a backward compatibility break.
# 2. If only `n` is provided (as a keyword argument), this would have
# been an error in the past because `n` would have been treated as
# the order of the moment while the shape parameter would be
# missing. It is still the same type of error, but for a different
# reason: now, `n` is treated as the shape parameter while the
# order of the moment is missing.
# 3. If only `order` is provided, no special treament is needed.
# Clearly this value is intended to be the order of the moment,
# and the rest of the function will determine whether `n` is
# available as a shape parameter in `args`.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided) and it is still an
# error for the same reason.
# Case B: the distribution does _not_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this was an error, and it
# still is an error: two values for same parameter.
# 2. If only `n` is provided (as a keyword argument), this was OK and
# is still OK, but there shold now be a `DeprecationWarning`. The
# value of `n` should be removed from `kwds` and stored in `order`.
# 3. If only `order` is provided, there was no problem before providing
# only the first argument of `moment`, and there is no problem with
# that now.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided), and it is still an
# error for the same reason.
if not got_order and ((not got_keyword_n) # A4 and B4
or (got_keyword_n and has_shape_n)): # A2
message = ("moment() missing 1 required "
"positional argument: `order`")
raise TypeError(message)
if got_keyword_n and not has_shape_n:
if got_order: # B1
# this will change to "moment got unexpected argument n"
message = "moment() got multiple values for first argument"
raise TypeError(message)
else: # B2
message = ("Use of keyword argument `n` for method "
"`moment` is deprecated. Use first positional "
"argument or keyword argument `order` instead.")
order = kwds.pop("n")
warnings.warn(message, DeprecationWarning, stacklevel=2)
n = order
# No special treatment of A1, A3, or B3 is needed because the order
# of the moment is now in variable `n` and the shape parameter, if
# needed, will be fished out of `args` or `kwds` by _parse_args
# A3 might still cause an error if the shape parameter called `n`
# is not found in `args`.
shapes, loc, scale = self._parse_args(*args, **kwds)
args = np.broadcast_arrays(*(*shapes, loc, scale))
*shapes, loc, scale = args
i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
i1 = np.logical_and(i0, loc == 0)
i2 = np.logical_and(i0, loc != 0)
args = argsreduce(i0, *shapes, loc, scale)
*shapes, loc, scale = args
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
val = np.empty(loc.shape) # val needs to be indexed by loc
val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
result = zeros(i0.shape)
place(result, ~i0, self.badvalue)
if i1.any():
res1 = scale[loc == 0]**n * val[loc == 0]
place(result, i1, res1)
if i2.any():
mom = [mu, mu2, g1, g2]
arrs = [i for i in mom if i is not None]
idx = [i for i in range(4) if mom[i] is not None]
if any(idx):
arrs = argsreduce(loc != 0, *arrs)
j = 0
for i in idx:
mom[i] = arrs[j]
j += 1
mu, mu2, g1, g2 = mom
args = argsreduce(loc != 0, *shapes, loc, scale, val)
*shapes, loc, scale, val = args
res2 = zeros(loc.shape, dtype='d')
fac = scale / loc
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
shapes)
res2 += comb(n, k, exact=True)*fac**k * valk
res2 += fac**n * val
res2 *= loc**n
place(result, i2, res2)
if result.ndim == 0:
return result.item()
return result
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, confidence=None, *args, **kwds):
"""Confidence interval with equal areas around the median.
.. deprecated:: 1.9.0
Parameter `alpha` is replaced by parameter `confidence` to avoid
name collisions with the shape parameter `alpha` of some
distributions. Parameter `alpha` will be removed in the second
release after 1.9.0.
Parameters
----------
confidence : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
# This function was originally written with parameter `alpha`, but
# `alpha` is also the name of a shape parameter of two distributions.
# This block allows the function to accept both `alpha` and its
# replacement `confidence` during a deprecation period; it can be
# removed in the second release after 1.9.0.
# See description of logic in `moment` method.
has_shape_alpha = (self.shapes is not None
and "alpha" in (self.shapes.split(", ")))
got_confidence = confidence is not None
got_keyword_alpha = kwds.get("alpha", None) is not None
if not got_confidence and ((not got_keyword_alpha)
or (got_keyword_alpha and has_shape_alpha)):
message = ("interval() missing 1 required positional argument: "
"`confidence`")
raise TypeError(message)
if got_keyword_alpha and not has_shape_alpha:
if got_confidence:
# this will change to "interval got unexpected argument alpha"
message = "interval() got multiple values for first argument"
raise TypeError(message)
else:
message = ("Use of keyword argument `alpha` for method "
"`interval` is deprecated. Use first positional "
"argument or keyword argument `confidence` "
"instead.")
confidence = kwds.pop("alpha")
warnings.warn(message, DeprecationWarning, stacklevel=2)
alpha = confidence
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf(self, x, *args):
return -np.sum(self._logpxf(x, *args), axis=0)
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpxf = self._logpxf(x, *args)
finite_logpxf = np.isfinite(logpxf)
n_bad += np.sum(~finite_logpxf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpxf[finite_logpxf], axis=0) + penalty
return -np.sum(logpxf, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
class _ShapeInfo:
def __init__(self, name, integrality=False, domain=(-np.inf, np.inf),
inclusive=(True, True)):
self.name = name
self.integrality = integrality
domain = list(domain)
if np.isfinite(domain[0]) and not inclusive[0]:
domain[0] = np.nextafter(domain[0], np.inf)
if np.isfinite(domain[1]) and not inclusive[1]:
domain[1] = np.nextafter(domain[1], -np.inf)
self.domain = domain
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
p = self._pdf(x, *args)
with np.errstate(divide='ignore'):
return log(p)
def _logpxf(self, x, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpdf(x, *args)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
**kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use.
The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable),
followed by those for location and scale.
For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc``
and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
# memory for method of moments
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False))
scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False))
param_info = shape_info + [loc_info, scale_info]
return param_info
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _logpxf(self, k, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpmf(k, *args)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-1]
scale = 1
args = tuple(theta[:-1])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond3 = (q == 0) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
lower_bound = _a - 1 + loc
upper_bound = _b + loc
place(output, cond2*(cond == cond), lower_bound)
place(output, cond3*(cond == cond), upper_bound)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False))
param_info = shape_info + [loc_info]
return param_info
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
mdhaber/scipy
|
scipy/stats/_distn_infrastructure.py
|
Python
|
bsd-3-clause
| 146,733
|
[
"Gaussian"
] |
441bf37fd74362c06e7e59ec233ca7cfa89a609867004d68855df6dafe1a87c7
|
import gpflow
import numpy as np
import tensorflow_probability as tfp
from gpflow.utilities import set_trainable, to_default_float
from . import BranchingTree as bt
from . import VBHelperFunctions, assigngp_dense, assigngp_denseSparse
from . import branch_kernParamGPflow as bk
def FitModel(
bConsider,
GPt,
GPy,
globalBranching,
priorConfidence=0.80,
M=10,
likvar=1.0,
kerlen=2.0,
kervar=5.0,
fDebug=False,
maxiter=100,
fPredict=True,
fixHyperparameters=False,
):
"""
Fit BGP model
:param bConsider: list of candidate branching points
:param GPt: pseudotime
:param GPy: gene expression. Should be 0 mean for best performance.
:param globalBranching: cell labels
:param priorConfidence: prior confidence on cell labels
:param M: number of inducing points
:param likvar: initial value for Gaussian noise variance
:param kerlen: initial value for kernel length scale
:param kervar: initial value for kernel variance
:param fDebug: Print debugging information
:param maxiter: maximum number of iterations for optimisation
:param fPredict: compute predictive mean and variance
:param fixHyperparameters: should kernel hyperparameters be kept fixed or optimised?
:return: dictionary of log likelihood, GPflow model, Phi matrix, predictive set of points,
mean and variance, hyperparameter values, posterior on branching time
"""
assert isinstance(bConsider, list), "Candidate B must be list"
assert GPt.ndim == 1
assert GPy.ndim == 2
assert (
GPt.size == GPy.size
), "pseudotime and gene expression data must be the same size"
assert (
globalBranching.size == GPy.size
), "state space must be same size as number of cells"
assert M >= 0, "at least 0 or more inducing points should be given"
phiInitial, phiPrior = GetInitialConditionsAndPrior(
globalBranching, priorConfidence, infPriorPhi=True
)
XExpanded, indices, _ = VBHelperFunctions.GetFunctionIndexListGeneral(GPt)
ptb = np.min([np.min(GPt[globalBranching == 2]), np.min(GPt[globalBranching == 3])])
tree = bt.BinaryBranchingTree(0, 1, fDebug=False)
tree.add(None, 1, np.ones((1, 1)) * ptb) # B can be anything here
(fm, _) = tree.GetFunctionBranchTensor()
kb = bk.BranchKernelParam(
gpflow.kernels.Matern32(1), fm, b=np.zeros((1, 1))
) + gpflow.kernels.White(1)
kb.kernels[1].variance.assign(
1e-6
) # controls the discontinuity magnitude, the gap at the branching point
set_trainable(kb.kernels[1].variance, False) # jitter for numerics
if M == 0:
m = assigngp_dense.AssignGP(
GPt,
XExpanded,
GPy,
kb,
indices,
np.ones((1, 1)) * ptb,
phiInitial=phiInitial,
phiPrior=phiPrior,
)
else:
ZExpanded = np.ones((M, 2))
ZExpanded[:, 0] = np.linspace(0, 1, M, endpoint=False)
ZExpanded[:, 1] = np.array([i for j in range(M) for i in range(1, 4)])[:M]
m = assigngp_denseSparse.AssignGPSparse(
GPt,
XExpanded,
GPy,
kb,
indices,
np.ones((1, 1)) * ptb,
ZExpanded,
phiInitial=phiInitial,
phiPrior=phiPrior,
)
# Initialise hyperparameters
m.likelihood.variance.assign(likvar)
m.kernel.kernels[0].kern.lengthscales.assign(kerlen)
m.kernel.kernels[0].kern.variance.assign(kervar)
if fixHyperparameters:
print("Fixing hyperparameters")
set_trainable(m.kernel.kernels[0].kern.lengthscales, False)
set_trainable(m.likelihood.variance, False)
set_trainable(m.kernel.kernels[0].kern.variance, False)
else:
if fDebug:
print("Adding prior logistic on length scale to avoid numerical problems")
m.kernel.kernels[0].kern.lengthscales.prior = tfp.distributions.Normal(
to_default_float(2.0), to_default_float(1.0)
)
m.kernel.kernels[0].kern.variance.prior = tfp.distributions.Normal(
to_default_float(3.0), to_default_float(1.0)
)
m.likelihood.variance.prior = tfp.distributions.Normal(
to_default_float(0.1), to_default_float(0.1)
)
# optimization
ll = np.zeros(len(bConsider))
Phi_l = list()
ttestl_l, mul_l, varl_l = list(), list(), list()
hyps = list()
for ib, b in enumerate(bConsider):
m.UpdateBranchingPoint(np.ones((1, 1)) * b, phiInitial)
try:
opt = gpflow.optimizers.Scipy()
opt.minimize(
m.training_loss,
variables=m.trainable_variables,
options=dict(disp=True, maxiter=maxiter),
)
# remember winning hyperparameter
hyps.append(
{
"likvar": m.likelihood.variance.numpy(),
"kerlen": m.kernel.kernels[0].kern.lengthscales.numpy(),
"kervar": m.kernel.kernels[0].kern.variance.numpy(),
}
)
ll[ib] = m.log_posterior_density()
except Exception as ex:
print(f"Unexpected error: {ex} {'-' * 60}\nCaused by model: {m} {'-' * 60}")
ll[0] = np.nan
# return model so can inspect model
return {
"loglik": ll,
"model": m,
"Phi": np.nan,
"prediction": {"xtest": np.nan, "mu": np.nan, "var": np.nan},
"hyperparameters": np.nan,
"posteriorB": np.nan,
}
# prediction
Phi = m.GetPhi()
Phi_l.append(Phi)
if fPredict:
ttestl, mul, varl = VBHelperFunctions.predictBranchingModel(m)
ttestl_l.append(ttestl), mul_l.append(mul), varl_l.append(varl)
else:
ttestl_l.append([]), mul_l.append([]), varl_l.append([])
iw = np.argmax(ll)
postB = GetPosteriorB(ll, bConsider)
if fDebug:
print(
"BGP Maximum at b=%.2f" % bConsider[iw],
"CI= [%.2f, %.2f]" % (postB["B_CI"][0], postB["B_CI"][1]),
)
assert np.allclose(bConsider[iw], postB["Bmode"]), "%s-%s" % str(
postB["B_CI"], bConsider[iw]
)
return {
"loglik": ll,
"Phi": Phi_l[iw], # 'model': m,
"prediction": {"xtest": ttestl_l[iw], "mu": mul_l[iw], "var": varl_l[iw]},
"hyperparameters": hyps[iw],
"posteriorB": postB,
}
def GetPosteriorB(objUnsorted, BgridSearch, ciLimits=[0.01, 0.99]):
"""
Return posterior on B for each experiment, confidence interval index, map index
"""
# for each trueB calculate posterior over grid
# ... in a numerically stable way
assert objUnsorted.size == len(BgridSearch), "size do not match %g-%g" % (
objUnsorted.size,
len(BgridSearch),
)
gr = np.array(BgridSearch)
isort = np.argsort(gr)
gr = gr[isort]
o = objUnsorted[isort].copy() # sorted objective funtion
imode = np.argmax(o)
pn = np.exp(o - np.max(o))
p = pn / pn.sum()
assert np.any(~np.isnan(p)), "Nans in p! %s" % str(p)
assert np.any(~np.isinf(p)), "Infinities in p! %s" % str(p)
pb_cdf = np.cumsum(p)
confInt = np.zeros(len(ciLimits), dtype=int)
for pb_i, pb_c in enumerate(ciLimits):
pb_idx = np.flatnonzero(pb_cdf <= pb_c)
if pb_idx.size == 0:
confInt[pb_i] = 0
else:
confInt[pb_i] = np.max(pb_idx)
# if((imode+5) > 0 and imode < (len(BgridSearch)-5)): # for modes at end points conf interval checks do not hold
# assert confInt[0] <= (imode-1), 'Lower confidence point bigger than mode! (%s)-%g' % (str(confInt), imode)
# assert confInt[1] >= (imode+1), 'Upper confidence point bigger than mode! (%s)-%g' % (str(confInt), imode)
assert np.all(confInt < o.size), confInt
B_CI = gr[confInt]
Bmode = gr[imode]
# return confidence interval as well as mode, and indexes for each
return {
"B_CI": B_CI,
"Bmode": Bmode,
"idx_confInt": confInt,
"idx_mode": imode,
"BgridSearch_sort": gr,
"isort": isort,
}
def GetInitialConditionsAndPrior(globalBranching, v, infPriorPhi):
# Setting initial phi
np.random.seed(42) # UNDONE remove TODO
assert isinstance(v, float), "v should be scalar is %s" % str(type(v))
N = globalBranching.size
phiInitial = np.ones((N, 2)) * 0.5 # don't know anything
phiInitial[:, 0] = np.random.rand(N)
phiInitial[:, 1] = 1 - phiInitial[:, 0]
phiPrior = np.ones_like(phiInitial) * 0.5 # don't know anything
for i in range(N):
iBranch = globalBranching[i] - 2 # is 1,2,3-> -1, 0, 1
if iBranch == -1:
# trunk - set all equal
phiPrior[i, :] = 0.5
else:
if infPriorPhi:
phiPrior[i, :] = 1 - v
phiPrior[i, int(iBranch)] = v
phiInitial[i, int(iBranch)] = 0.5 + (
np.random.random() / 2.0
) # number between [0.5, 1]
phiInitial[i, int(iBranch) != np.array([0, 1])] = (
1 - phiInitial[i, int(iBranch)]
)
assert np.allclose(
phiPrior.sum(1), 1
), "Phi Prior should be close to 1 but got %s" % str(phiPrior)
assert np.allclose(
phiInitial.sum(1), 1
), "Phi Initial should be close to 1 but got %s" % str(phiInitial)
assert np.all(~np.isnan(phiInitial)), "No nans please!"
assert np.all(~np.isnan(phiPrior)), "No nans please!"
return phiInitial, phiPrior
|
ManchesterBioinference/BranchedGP
|
BranchedGP/FitBranchingModel.py
|
Python
|
apache-2.0
| 9,710
|
[
"Gaussian"
] |
ce02b829a0e68780b30a4e129d33c7068511cc7de1d957c84dc0892f70c10078
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read data
reader = vtk.vtkMultiBlockPLOT3DReader()
reader.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
reader.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
reader.SetScalarFunctionNumber(110)
reader.Update()
output = reader.GetOutput().GetBlock(0)
# create outline
outlineF = vtk.vtkStructuredGridOutlineFilter()
outlineF.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outlineF.GetOutputPort())
outline = vtk.vtkActor()
outline.SetMapper(outlineMapper)
outline.GetProperty().SetColor(0,0,0)
# create cursor
cursor = vtk.vtkCursor3D()
cursor.SetModelBounds(output.GetBounds())
cursor.SetFocalPoint(output.GetCenter())
cursor.AllOff()
cursor.AxesOn()
cursor.OutlineOn()
cursor.XShadowsOn()
cursor.YShadowsOn()
cursor.ZShadowsOn()
cursorMapper = vtk.vtkPolyDataMapper()
cursorMapper.SetInputConnection(cursor.GetOutputPort())
cursorActor = vtk.vtkActor()
cursorActor.SetMapper(cursorMapper)
cursorActor.GetProperty().SetColor(1,0,0)
# create probe
probe = vtk.vtkProbeFilter()
probe.SetInputData(cursor.GetFocus())
probe.SetSourceData(output)
# create a cone geometry for glyph
cone = vtk.vtkConeSource()
cone.SetResolution(16)
cone.SetRadius(0.25)
# create glyph
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(probe.GetOutputPort())
glyph.SetSourceConnection(cone.GetOutputPort())
glyph.SetVectorModeToUseVector()
glyph.SetScaleModeToScaleByScalar()
glyph.SetScaleFactor(.0002)
glyphMapper = vtk.vtkPolyDataMapper()
glyphMapper.SetInputConnection(glyph.GetOutputPort())
glyphActor = vtk.vtkActor()
glyphActor.SetMapper(glyphMapper)
ren1.AddActor(outline)
ren1.AddActor(cursorActor)
ren1.AddActor(glyphActor)
ren1.SetBackground(1.0,1.0,1.0)
renWin.SetSize(200,200)
ren1.ResetCamera()
ren1.GetActiveCamera().Elevation(60)
ren1.ResetCameraClippingRange()
renWin.Render()
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/General/Testing/Python/cursor3D.py
|
Python
|
bsd-3-clause
| 2,339
|
[
"VTK"
] |
dd68995bea2e405e6288569e81e3aea556b5077f23587457aa9916798ca2deb3
|
#coding: utf8
import numpy as N
from traits.api import Int, Float, Tuple, Range
from traitsui.api import View, VGroup, Item
from enable.api import ColorTrait
from DisplayPlugin import DisplayPlugin
class BeamProfiler(DisplayPlugin):
# These traits control the calculation of the Gaussian fit
background_percentile = Range(0.0, 100.0, 15.0)
num_crops = Range(0, 5, 1)
crop_radius = Range(1.0, 4.0, 1.5) # in beam diameters
# These are the results of the calculation
_centroid = Tuple(Float(), Float())
_minor_axis = Float()
_major_axis = Float()
_angle = Float()
_ellipticity = Float()
_baseline = Float()
_include_radius = Float()
# These control the visualization
num_points = Int(40)
color = ColorTrait('white')
view = View(
VGroup(
Item('active'),
Item('background_percentile'),
Item('num_crops', label='Crop # times'),
Item('crop_radius'),
label='Beam Profiler',
show_border=True))
def __init__(self, **traits):
super(BeamProfiler, self).__init__(**traits)
self.screen.data_store['centroid_x'] = N.array([])
self.screen.data_store['centroid_y'] = N.array([])
self.screen.data_store['ellipse_x'] = N.array([])
self.screen.data_store['ellipse_y'] = N.array([])
renderers = self.screen.plot.plot(('centroid_x', 'centroid_y'),
type='scatter',
marker_size=2.0,
color=self.color,
marker='circle')
self._centroid_patch = renderers[0]
self._centroid_patch.visible = self.active
renderers = self.screen.plot.plot(('ellipse_x', 'ellipse_y'),
type='line',
color=self.color)
self._ellipse_patch = renderers[0]
self._ellipse_patch.visible = self.active
# Connect handlers
self.on_trait_change(self._move_centroid, '_centroid', dispatch='ui')
self.on_trait_change(self._redraw_ellipse,
'_centroid,_width,_height,_angle', dispatch='ui')
self.on_trait_change(self._update_hud,
'_centroid,_width,_height,_angle,_ellipticity,_baseline,'
'_include_radius',
dispatch='ui')
def _move_centroid(self):
self.screen.data_store['centroid_x'] = N.array([self._centroid[0]])
self.screen.data_store['centroid_y'] = N.array([self._centroid[1]])
def _redraw_ellipse(self):
# Draw an N-point ellipse at the 1/e radius of the Gaussian fit
# Using a parametric equation in t
t = N.linspace(0, 2 * N.pi, self.num_points)
angle = N.radians(self._angle)
x0, y0 = self._centroid
sin_t, cos_t = N.sin(t), N.cos(t)
sin_angle, cos_angle = N.sin(angle), N.cos(angle)
r_a = self._major_axis / 2.0
r_b = self._minor_axis / 2.0
x = x0 + r_a * cos_t * cos_angle - r_b * sin_t * sin_angle
y = y0 + r_a * cos_t * sin_angle + r_b * sin_t * cos_angle
self.screen.data_store['ellipse_x'] = x
self.screen.data_store['ellipse_y'] = y
def _update_hud(self):
self.screen.hud('profiler',
'Centroid: {0._centroid[0]:.1f}, {0._centroid[1]:.1f}\n'
'Major axis: {0._major_axis:.1f}\n'
'Minor axis: {0._minor_axis:.1f}\n'
u'Rotation: {0._angle:.1f}°\n'
'Ellipticity: {0._ellipticity:.3f}\n'
'Baseline: {0._baseline:.1f}\n'
'Inclusion radius: {0._include_radius:.1f}'.format(self))
def _process(self, frame):
bw = (len(frame.shape) == 2)
if not bw:
# Use standard NTSC conversion formula
frame = N.array(
0.2989 * frame[..., 0]
+ 0.5870 * frame[..., 1]
+ 0.1140 * frame[..., 2])
# Calibrate the background
background = N.percentile(frame, self.background_percentile)
frame -= background
#N.clip(frame, 0.0, frame.max(), out=frame)
m00, m10, m01, m20, m02, m11 = _calculate_moments(frame)
bc, lc = 0, 0
for count in range(self.num_crops):
include_radius, dlc, dbc, drc, dtc, frame = _crop(frame,
self.crop_radius, m00, m10, m01, m20, m02, m11)
lc += dlc
bc += dbc
# Recalibrate the background and recalculate the moments
new_bkg = N.percentile(frame, self.background_percentile)
frame -= new_bkg
background += new_bkg
#N.clip(frame, 0.0, frame.max(), out=frame)
m00, m10, m01, m20, m02, m11 = _calculate_moments(frame)
m10 += lc
m01 += bc
# Calculate Gaussian boundary
q = N.sqrt((m20 - m02) ** 2 + 4 * m11 ** 2)
self._major_axis = 2 ** 1.5 * N.sqrt(m20 + m02 + q)
self._minor_axis = 2 ** 1.5 * N.sqrt(m20 + m02 - q)
self._angle = N.degrees(0.5 * N.arctan2(2 * m11, m20 - m02))
self._ellipticity = self._minor_axis / self._major_axis
self._centroid = (m10, m01)
self._baseline = background
self._include_radius = include_radius
def activate(self):
self._centroid_patch.visible = self._ellipse_patch.visible = True
def deactivate(self):
self.screen.hud('profiler', None)
self._centroid_patch.visible = self._ellipse_patch.visible = False
def _calculate_moments(frame):
"""Calculate the moments"""
# From Bullseye
y, x = N.mgrid[:frame.shape[0], :frame.shape[1]]
m00 = frame.sum() or 1.0
m10 = (frame * x).sum() / m00
m01 = (frame * y).sum() / m00
dx, dy = x - m10, y - m01
m20 = (frame * dx ** 2).sum() / m00
m02 = (frame * dy ** 2).sum() / m00
m11 = (frame * dx * dy).sum() / m00
return m00, m10, m01, m20, m02, m11
def _crop(frame, crop_radius, m00, m10, m01, m20, m02, m11):
"""crop based on 3 sigma region"""
w20 = crop_radius * 4 * N.sqrt(m20)
w02 = crop_radius * 4 * N.sqrt(m02)
include_radius = N.sqrt((w20 ** 2 + w02 ** 2) / 2)
w02 = max(w02, 4)
w20 = max(w20, 4)
lc = int(max(0, m10 - w20))
bc = int(max(0, m01 - w02))
tc = int(min(frame.shape[0], m01 + w02))
rc = int(min(frame.shape[1], m10 + w20))
frame = frame[bc:tc, lc:rc]
return include_radius, lc, bc, rc, tc, frame
|
ptomato/Beams
|
beams/BeamProfiler.py
|
Python
|
mit
| 6,347
|
[
"Gaussian"
] |
247cc1dafb7167565dcdfcd6047b102a3c86a7ef6153a15f2c7020c40043e78b
|
#!/usr/bin/env python
#
# displace.py
#
# Simple script to generate input files of given displacement patterns.
# Currently, VASP, Quantum-ESPRESSO, and xTAPP are supported.
#
# Copyright (c) 2014 Terumasa Tadano
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
"""
Input file generator for displaced configurations.
"""
from __future__ import print_function
import optparse
import numpy as np
import interface.VASP as vasp
import interface.QE as qe
import interface.xTAPP as xtapp
import interface.OpenMX as openmx
import interface.LAMMPS as lammps
usage = "usage: %prog [options] file.pattern_HARMONIC file.pattern_ANHARM3 ... \n \
file.pattern_* can be generated by 'alm' with MODE = suggest."
parser = optparse.OptionParser(usage=usage)
parser.add_option('--mag',
help="Magnitude of displacement in units of \
Angstrom (default: 0.02)")
parser.add_option('--prefix',
help="Prefix of the files to be created. ")
parser.add_option('--QE',
metavar='orig.pw.in',
help="Quantum-ESPRESSO input file with equilibrium atomic positions (default: None)")
parser.add_option('--VASP',
metavar='orig.POSCAR',
help="VASP POSCAR file with equilibrium atomic \
positions (default: None)")
parser.add_option('--xTAPP',
metavar='orig.cg',
help="xTAPP CG file with equilibrium atomic \
positions (default: None)")
parser.add_option('--LAMMPS',
metavar='orig.lammps',
help="LAMMPS structure file with equilibrium atomic positions (default: None)")
parser.add_option('--OpenMX',
metavar='orig.dat',
help="dat file with equilibrium atomic \
positions (default: None)")
def parse_displacement_patterns(files_in):
pattern = []
for file in files_in:
pattern_tmp = []
f = open(file, 'r')
tmp, basis = f.readline().rstrip().split(':')
if basis == 'F':
print("Warning: DBASIS must be 'C'")
exit(1)
while True:
line = f.readline()
if not line:
break
line_split_by_colon = line.rstrip().split(':')
is_entry = len(line_split_by_colon) == 2
if is_entry:
pattern_set = []
natom_move = int(line_split_by_colon[1])
for i in range(natom_move):
disp = []
line = f.readline()
line_split = line.rstrip().split()
disp.append(int(line_split[0]))
for j in range(3):
disp.append(float(line_split[j + 1]))
pattern_set.append(disp)
pattern_tmp.append(pattern_set)
print("File %s containts %i displacement patterns" \
% (file, len(pattern_tmp)))
for entry in pattern_tmp:
if entry not in pattern:
pattern.append(entry)
f.close()
print("")
print("Number of unique displacement patterns = %d" % len(pattern))
return pattern
def char_xyz(entry):
if entry % 3 == 0:
return 'x'
elif entry % 3 == 1:
return 'y'
elif entry % 3 == 2:
return 'z'
def gen_displacement(counter_in, pattern, disp_mag, nat, invlavec):
poscar_header = "Disp. Num. %i" % counter_in
poscar_header += " ( %f Angstrom" % disp_mag
disp = np.zeros((nat, 3))
for displace in pattern:
atom = displace[0] - 1
poscar_header += ", %i : " % displace[0]
str_direction = ""
for i in range(3):
if abs(displace[i + 1]) > 1.0e-10:
if displace[i + 1] > 0.0:
str_direction += "+" + char_xyz(i)
else:
str_direction += "-" + char_xyz(i)
disp[atom][i] += displace[i + 1] * disp_mag
poscar_header += str_direction
poscar_header += ")"
if invlavec is not None:
for i in range(nat):
disp[i] = np.dot(disp[i], invlavec.T)
return poscar_header, disp
def get_number_of_zerofill(npattern):
nzero = 1
while True:
npattern //= 10
if npattern == 0:
break
nzero += 1
return nzero
if __name__ == '__main__':
options, args = parser.parse_args()
file_pattern = args[0:]
print("*****************************************************************")
print(" displace.py -- Input file generator ")
print("*****************************************************************")
print("")
if len(file_pattern) == 0:
print("Usage: displace.py [options] file1.pattern_HARMONIC\
file2.pattern_ANHARM3 ...")
print("file.pattern_* can be generated by 'alm' with MODE = suggest.")
print("")
print("For details of available options, \
please type\n$ python displace.py -h")
exit(1)
conditions = [options.VASP is None,
options.QE is None,
options.xTAPP is None,
options.LAMMPS is None,
options.OpenMX is None]
if conditions.count(True) == len(conditions):
print("Error : Either --VASP, --QE, --xTAPP, --LAMMPS, --OpenMX option must be given.")
exit(1)
elif len(conditions) - conditions.count(True) > 1:
print("Error : --VASP, --QE, --xTAPP, --LAMMPS, and --OpenMX cannot be given simultaneously.")
exit(1)
elif options.VASP:
code = "VASP"
print("--VASP option is given: Generate POSCAR files for VASP")
print("")
elif options.QE:
code = "QE"
print("--QE option is given: Generate input files for Quantum-ESPRESSO.")
print("")
elif options.xTAPP:
code = "xTAPP"
print("--xTAPP option is given: Generate input files for xTAPP.")
print("")
elif options.LAMMPS:
code = "LAMMPS"
print("--LAMMPS option is given: Generate input files for LAMMPS.")
print("")
elif options.OpenMX:
code = "OpenMX"
print("--OpenMX option is given: Generate dat files for OpenMX")
print("")
# Assign the magnitude of displacements
if options.mag is None:
options.mag = "0.02"
disp_length = 0.02
print("--mag option not given. Substituted by the default (0.02 Angstrom)")
print("")
else:
disp_length = float(options.mag)
if options.prefix is None:
prefix = "disp"
print("--prefix option not given. Substituted by the default (\"disp\"). ")
print("")
else:
prefix = options.prefix
print("-----------------------------------------------------------------")
print("")
if code == "VASP":
str_outfiles = "%s{counter}.POSCAR" % prefix
file_original = options.VASP
elif code == "QE":
str_outfiles = "%s{counter}.pw.in" % prefix
file_original = options.QE
suffix = "pw.in"
elif code == "xTAPP":
str_outfiles = "%s{counter}.cg" % prefix
file_original = options.xTAPP
elif code == "LAMMPS":
str_outfiles = "%s{counter}.lammps" % prefix
file_original = options.LAMMPS
elif code == "OpenMX":
str_outfiles = "%s{counter}.dat" % prefix
file_original = options.OpenMX
# Read the original file
if code == "VASP":
aa, aa_inv, elems, nats, x_frac = vasp.read_POSCAR(file_original)
nat = np.sum(nats)
elif code == "QE":
list_namelist, list_ATOMIC_SPECIES, \
list_K_POINTS, list_CELL_PARAMETERS, list_OCCUPATIONS, \
nat, lavec, kd_symbol, x_frac, aa_inv = qe.read_original_QE(
file_original)
elif code == "xTAPP":
str_header, nat, nkd, aa, aa_inv, x_frac, kd = xtapp.read_CG(file_original)
suffix = "cg"
elif code == "LAMMPS":
common_settings, nat, x_cart, kd = lammps.read_lammps_structure(file_original)
aa_inv = None
elif code == "OpenMX":
aa, aa_inv, nat, x_frac = openmx.read_OpenMX_input(file_original)
print("Original file : %s" % file_original)
print("Output file format : %s" % str_outfiles)
print("Magnitude of displacements : %s Angstrom" % disp_length)
print("Number of atoms : %i" % nat)
print("")
disp_pattern = parse_displacement_patterns(args[:])
nzerofills = get_number_of_zerofill(len(disp_pattern))
counter = 0
for pattern in disp_pattern:
counter += 1
header, disp = gen_displacement(counter, pattern, disp_length,
nat, aa_inv)
if code == "VASP":
vasp.write_POSCAR(prefix, counter, header, nzerofills,
aa, elems, nats, disp, x_frac)
elif code == "QE":
qe.generate_QE_input(prefix, suffix, counter, nzerofills, list_namelist,
list_ATOMIC_SPECIES, list_K_POINTS,
list_CELL_PARAMETERS, list_OCCUPATIONS,
nat, kd_symbol, x_frac, disp)
elif code == "xTAPP":
nsym = 1
symop = []
symop.append([1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0])
denom_tran = 1
has_inv = 0
xtapp.gen_CG(prefix, suffix, counter, nzerofills, str_header, nat, kd,
x_frac, disp, nsym, symop, denom_tran, has_inv)
elif code == "LAMMPS":
lammps.write_lammps_structure(prefix, counter, header, nzerofills,
common_settings, nat, kd, x_cart, disp)
elif code == "OpenMX":
openmx.write_OpenMX_input(prefix, counter, nzerofills, disp, aa, file_original)
print("")
print("All input files are created.")
|
ttadano/ALM
|
tools/displace.py
|
Python
|
mit
| 10,240
|
[
"ESPResSo",
"LAMMPS",
"OpenMX",
"VASP"
] |
5a806ff06a41649c1928e09330e54a76740718577823fde4b393ebd7a2d05a7a
|
#!/usr/bin/env python
import numpy
import itertools
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from crystal import fillcell, tikz_atoms
def SiMn_Isite():
a = 5.43
fcc = Lattice([[a/2,a/2,0],[a/2,0,a/2],[0,a/2,a/2]])
isite = Structure(fcc,['Si']*2,[[0.00,0.00,0.00],[0.25,0.25,0.25]])
# Make the cell cubic
isite.make_supercell([[1,1,-1],[1,-1,1],[-1,1,1]])
# Insert Mn atom
isite.append('Mn',[0.50,0.50,0.50])
return isite
def SiMn_Ssite():
a = 5.43
fcc = Lattice([[a/2,a/2,0],[a/2,0,a/2],[0,a/2,a/2]])
ssite = Structure(fcc,['Si']*2,[[0.00,0.00,0.00],[0.25,0.25,0.25]])
# Make the cell cubic
ssite.make_supercell([[1,1,-1],[1,-1,1],[-1,1,1]])
# Insert Mn atom
Mnsite = numpy.array([0.25,0.25,0.25]);
for i,atom in enumerate(ssite):
if numpy.linalg.norm(atom.frac_coords-Mnsite) < 0.01:
del ssite[i]
ssite.append('Mn',Mnsite)
return ssite
atoms = SiMn_Isite()
atoms_full = fillcell(atoms)
bondatoms = []
for sitei,sitej in itertools.combinations(atoms_full,2):
radius = sitei.specie.atomic_radius + sitej.specie.atomic_radius
bondlength = sitei.distance_from_point(sitej.coords)
if bondlength <= 1.25 * radius:
if sitei.specie.symbol != 'Mn' and sitej.specie.symbol != 'Mn':
bondatoms.append((sitei,sitej))
tikz_atoms(atoms_full, bondatoms, drawcell = True)
|
ldamewood/figures
|
scripts/SiMn_sites.py
|
Python
|
mit
| 1,478
|
[
"CRYSTAL",
"pymatgen"
] |
a4fe42cc5bfff4119fd306cad46a86cb8134c0b2b4382f7c938bf88b8b761146
|
'''
This application uses Flask as a web server and jquery to trigger
pictures with SimpleCV
To use start the web server:
>>> python flask-server.py
Then to run the application:
>>> python webkit-gtk.py
*Note: You are not required to run the webkit-gtk.py, you can also
visit http://localhost:5000
'''
print __doc__
from flask import Flask, jsonify, render_template, request
from werkzeug import SharedDataMiddleware
import tempfile, os
import simplejson as json
import SimpleCV
app = Flask(__name__)
cam = SimpleCV.Camera()
@app.route('/')
def show(name=None):
img = cam.getImage()
tf = tempfile.NamedTemporaryFile(suffix=".png")
loc = 'static/' + tf.name.split('/')[-1]
tf.close()
img.save(loc)
return render_template('index.html', img=loc)
@app.route('/_snapshot')
def snapshot():
'''
Takes a picture and returns a path via json
used as ajax callback for taking a picture
'''
img = cam.getImage()
tf = tempfile.NamedTemporaryFile(suffix=".png")
loc = 'static/' + tf.name.split('/')[-1]
tf.close()
img.save(loc)
print "location",loc
print "json", json.dumps(loc)
return json.dumps(loc)
if __name__ == '__main__':
if app.config['DEBUG']:
from werkzeug import SharedDataMiddleware
import os
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/': os.path.join(os.path.dirname(__file__), 'static')
})
app.run()
|
vdt/SimpleCV
|
SimpleCV/examples/web-based/webdisplay/flask-server.py
|
Python
|
bsd-3-clause
| 1,451
|
[
"VisIt"
] |
25eb6efee748465cace6224beb30dc39c28dfada5b6f1cca3d5efbe72980160a
|
##
# Copyright 2009-2012 Ghent University
# Copyright 2009-2012 Stijn De Weirdt
# Copyright 2010 Dries Verdegem
# Copyright 2010-2012 Kenneth Hoste
# Copyright 2011 Pieter De Baets
# Copyright 2011-2012 Jens Timmerman
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing netCDF, implemented as an easyblock
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.modules import get_software_root, get_software_version
class EB_netCDF(ConfigureMake):
"""Support for building/installing netCDF"""
def configure_step(self):
"""Configure build: set config options and configure"""
self.cfg.update('configopts', "--enable-shared")
if self.toolchain.options['pic']:
self.cfg.update('configopts', '--with-pic')
self.cfg.update('configopts', 'FCFLAGS="%s" CC="%s" FC="%s"' % (os.getenv('FFLAGS'),
os.getenv('MPICC'),
os.getenv('F90')
))
# add -DgFortran to CPPFLAGS when building with GCC
if self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable
self.cfg.update('configopts', 'CPPFLAGS="%s -DgFortran"' % os.getenv('CPPFLAGS'))
super(EB_netCDF, self).configure_step()
def sanity_check_step(self):
"""
Custom sanity check for netCDF
"""
incs = ["netcdf.h"]
libs = ["libnetcdf.so", "libnetcdf.a"]
# since v4.2, the non-C libraries have been split off in seperate extensions_step
# see netCDF-Fortran and netCDF-C++
if LooseVersion(self.version) < LooseVersion("4.2"):
incs += ["netcdf%s" % x for x in ["cpp.h", ".hh", ".inc", ".mod"]] + \
["ncvalues.h", "typesizes.mod"]
libs += ["libnetcdf_c++.so", "libnetcdff.so",
"libnetcdf_c++.a", "libnetcdff.a"]
custom_paths = {
'files': ["bin/nc%s" % x for x in ["-config", "copy", "dump",
"gen", "gen3"]] +
["lib/%s" % x for x in libs] +
["include/%s" % x for x in incs],
'dirs': []
}
super(EB_netCDF, self).sanity_check_step(custom_paths=custom_paths)
def set_netcdf_env_vars(log):
"""Set netCDF environment variables used by other software."""
netcdf = get_software_root('netCDF')
if not netcdf:
log.error("netCDF module not loaded?")
else:
env.setvar('NETCDF', netcdf)
log.debug("Set NETCDF to %s" % netcdf)
netcdff = get_software_root('netCDF-Fortran')
netcdf_ver = get_software_version('netCDF')
if not netcdff:
if LooseVersion(netcdf_ver) >= LooseVersion("4.2"):
log.error("netCDF v4.2 no longer supplies Fortran library, also need netCDF-Fortran")
else:
env.setvar('NETCDFF', netcdff)
log.debug("Set NETCDFF to %s" % netcdff)
def get_netcdf_module_set_cmds(log):
"""Get module setenv commands for netCDF."""
netcdf = os.getenv('NETCDF')
if netcdf:
txt = "setenv NETCDF %s\n" % netcdf
# netCDF-Fortran is optional (only for netCDF v4.2 and later)
netcdff = os.getenv('NETCDFF')
if netcdff:
txt += "setenv NETCDFF %s\n" % netcdff
return txt
else:
log.error("NETCDF environment variable not set?")
|
JensTimmerman/easybuild-easyblocks
|
easybuild/easyblocks/n/netcdf.py
|
Python
|
gpl-2.0
| 4,802
|
[
"NetCDF"
] |
594b1b0861b5f1cfb80bbd72759645312cc6ad6514dbb20a1f4d66dd170e5486
|
#coding: utf-8
# Hungarian Phrasebook, <https://www.youtube.com/watch?v=G6D1YI-41ao>
# Use with Pythonista
# The MIT License
#
# Copyright © 2014 Mia Sinno Smith and Steven Thomas Smith
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math, os, re, sound, time
from scene import *
import random
from itertools import cycle
from functools import partial
class HungarianPhrasebook (Scene):
def setup(self):
# This will be called before the first frame is drawn.
# add bounds and size by hand if run from console
if not hasattr(self,'bounds'):
self.bounds = Rect(0, 0, 1024, 748)
if not hasattr(self,'size'):
self.size = Size(1024, 748)
self.HP_init()
# Set up the root layer and one other layer:
self.root_layer = Layer(self.bounds)
self.root_layer.background = Color(0.2,0.2,0.2)
self.totcols = 2*self.ncols + 1 # user row + reveal + Pythonista row
self.sepx = 5
self.card_size = math.floor((self.size.w-(self.totcols+6)*self.sepx)/self.totcols) # 96 if self.size.w > 700 else 48
self.width = (self.card_size + self.sepx) * self.totcols + 6*self.sepx # xtra space between guess and Pythonista row
self.height = self.size.h - (self.card_size + self.sepx)
self.offset = Point((self.size.w - self.width)/2,self.height)
self.deal_cards()
def HP_init(self):
# define Hungarian Phrasebook variables and functions
self.ncols = 4
self.ncolors = 7
self.guesses = []
self.pguesses = []
self.hyps = ()
self.phyps = ()
self.rows = []
self.prows = []
self.prows_revealed = []
self.guess_cards = []
self.pcards_deal_flag = True
emoji = self.emoji()
self.ncolors = max(1,self.ncolors)
self.ncolors = min(len(emoji),self.ncolors)
# characters
self.characters = []
self.colors = []
while len(self.characters) < self.ncolors:
k = random.randint(0,len(emoji)-1)
self.characters.append(emoji[k])
self.colors.append(Color(random.random(), random.random(), random.random()))
del emoji[k]
# The game
# colored and white pegs, standard sorting
self.rpegs = {'R': 1, 'W': 2, '-': 3, ' ': 4}
# define peg letters for hash table
self.firstletter = 'A'
self.chrpegs = dict((k,chr(ord(self.firstletter)+k-1)) for k in range(0,self.ncolors))
# the result
self.default_result = ['-'] * self.ncols
# the truth
self.truth = "".join([chr(random.randint(0,self.ncolors-1)+ord(self.firstletter)) for x in range(self.ncols)])
# print 'Truth is ' + repr([self.characters[k] for k in self.codetocards(self.truth)])
# guesses and results
self.nhyp = []
self.nhyp.append(self.ncolors**self.ncols)
def emoji(self):
# List of Pythonista emoji
emoji = os.listdir(sys.path[0] + 'Textures');
emoji = filter(lambda k: re.match('^[A-Z].+\.png',k) and
not re.match('^(ionicons|Typicons|PC_)',k),emoji);
emoji = [k.replace(".png","") for k in emoji]
cute_emoji = ['Ant','Baby_Chick_1','Baby_Chick_2','Baby_Chick_3','Bactrian_Camel','Bear_Face',
'Bird','Blowfish','Boar','Bug','Cat_Face_Crying','Cat_Face_Grinning',
'Cat_Face_Heart-Shaped_Eyes','Cat_Face_Kissing','Cat_Face_Pouting',
'Cat_Face_Smiling','Cat_Face_Weary','Cat_Face_With_Tears_Of_Joy',
'Cat_Face_With_Wry_Smile','Cat_Face','Chicken','Cow_Face','Dog_Face',
'Dolphin','Elephant','Fish','Frog_Face','Hamster_Face','Honeybee',
'Horse_Face','Horse','Koala','Lady_Beetle','Monkey_Face',
'Monkey_Hear-No-Evil','Monkey_See-No-Evil','Monkey_Speak-No-Evil',
'Monkey','Mouse_Face','Octopus','Panda_Face',
'Penguin','Pig_Face','Pig_Nose','Poodle','Rabbit_Face','Sheep',
'Snail','Snake','Spiral_Shell','Tiger_Face','Tropical_Fish','Turtle',
'Whale','Wolf_Face','Aubergine','Banana','Birthday_Cake','Bread',
'Candy','Cherries','Chestnut','Chocolate_Bar','Coffee','Cooked_Rice',
'Cookie','Cooking','Corn','Doughnut','Grapes','Green_Apple','Hamburger',
'Ice_Cream','Lollipop','Meat_On_Bone','Melon','Oden','Peach','Pineapple',
'Pot_Of_Food','Poultry_Leg','Red_Apple','Shaved_Ice','Shortcake',
'Slice_Of_Pizza','Soft_Ice_Cream','Spaghetti','Strawberry','Tangerine',
'Tomato','Watermelon','Alien_Monster','Artist_Palette','Balloon',
'Crown','Crystal_Ball','Gem_Stone','Honey_Pot','Jack-O-Lantern','Moyai',
'Musical_Keyboard','Package','Party_Popper','Pile_Of_Poo','Ribbon',
'Snowman_Without_Snow','Alien','Baby','Boy','Ghost','Girl','Guardsman',
'Man_And_Woman','Man','Older_Man','Older_Woman','Person_Blond',
'Police_Officer','Princess','Woman','Worker','Blossom','Bouquet',
'Cactus','Cherry_Blossom','Four_Leaf_Clover','Hibiscus','Maple_Leaf',
'Mushroom','Palm_Tree','Rose','Sunflower','Tulip','Smiling_1',
'Stuck-Out_Tongue_2','Card_Joker','Cloud','Cyclone','Fire','Heart',
'Moon_5','Recycling_Symbol','Skull','Speech_Balloon','Sun_1','Rocket']
if True:
return cute_emoji
else:
return emoji
def advance_row(self):
self.guesses.append(self.cardstocode(self.cards))
self.rows.append(self.cards)
if len(self.guesses) == 1:
self.hyps = self.make_hypspace(self.guesses[-1])
else:
self.hyps = self.reduce_hypspace(self.hyps,self.guesses[-1])
if self.height - (self.card_size + self.sepx) < 0:
self.game_over()
return
self.deal_pcards()
self.height -= (self.card_size + self.sepx)
self.deal_cards()
def deal_cards(self):
self.cards = []
for k in range(self.ncols):
card = Layer(Rect(self.offset.x + k * (self.card_size + self.sepx),self.height,
self.card_size, self.card_size))
card.icyc = cycle(range(0,self.ncolors))
for k in range(random.randint(1,self.ncolors)):
card.idx = card.icyc.next()
card.background = self.colors[card.idx]
card.image = self.characters[card.idx]
card.stroke = Color(0.6, 0.6, 0.6)
card.stroke_weight = 4.0
self.root_layer.add_layer(card)
self.cards.append(card)
guess_card = Layer(Rect(self.offset.x + self.ncols * (self.card_size + self.sepx)+2*self.sepx,
self.height, self.card_size, self.card_size))
guess_card.background = Color(0.95,0.95,0.95)
guess_card.stroke = Color(1.0, 0.65, 1.0)
guess_card.stroke_weight = 4.0
guess_card.revealed = False
self.root_layer.add_layer(guess_card)
self.guess_cards.append(guess_card)
self.font_size = 60 if self.size.w > 700 else 48
guess_layer = TextLayer('?', 'GillSans', self.font_size)
guess_layer.frame.center(guess_card.frame.x + guess_card.frame.w / 2,
guess_card.frame.y + guess_card.frame.h / 2)
guess_layer.tint = Color(1.0,0.0,1.0)
self.root_layer.add_layer(guess_layer)
self.guess_layer = guess_layer
font_size = 24 if self.size.w > 700 else 12
if len(self.guesses) == 0:
# You
self.youtext_layer = TextLayer(
'You, {}^{} = {} words'.format(
self.ncolors,self.ncols,self.ncolors**self.ncols),
'Futura', font_size)
self.youtext_layer.frame.center(self.offset.x
+ (self.ncols * (self.card_size + self.sepx) - self.sepx)/2,
self.height-1.2*font_size/2)
self.root_layer.add_layer(self.youtext_layer)
# Pythonista
self.ptext_layer = TextLayer(
'Pythonista, {}^{} = {} words'.format(
self.ncolors,self.ncols,self.ncolors**self.ncols),
'Futura', font_size)
self.ptext_layer.frame.center(self.offset.x
+ (self.ncols+1) * (self.card_size + self.sepx) + 2*self.sepx
+ (self.ncols * (self.card_size + self.sepx) - self.sepx)/2,
self.height + 0.608*self.card_size + self.sepx - 1.2*font_size/2)
self.root_layer.add_layer(self.ptext_layer)
else:
# You
plural = 's' if len(self.hyps) > 1 else ''
youtext_layer = TextLayer(
'You, {} word{}'.format(len(self.hyps),plural),
'Futura', font_size)
youtext_layer.frame.center(self.offset.x
+ (self.ncols * (self.card_size + self.sepx) - self.sepx)/2,
self.height-1.2*font_size/2)
self.root_layer.remove_layer(self.youtext_layer)
self.youtext_layer = youtext_layer
self.root_layer.add_layer(self.youtext_layer)
# Pythonista
if not self.pcards_deal_flag: return
plural = 's' if len(self.phyps) > 1 else ''
ptext_layer = TextLayer(
'Pythonista, {} word{}'.format(len(self.phyps),plural),
'Futura', font_size)
ptext_layer.frame.center(self.offset.x
+ (self.ncols+1) * (self.card_size + self.sepx) + 2*self.sepx
+ (self.ncols * (self.card_size + self.sepx) - self.sepx)/2,
self.height + self.card_size + self.sepx - 1.2*font_size/2)
self.root_layer.remove_layer(self.ptext_layer)
self.ptext_layer = ptext_layer
self.root_layer.add_layer(self.ptext_layer)
if self.redandwhitepegs(self.pguesses[-1],self.truth) == 'R' * self.ncols:
self.ptext_layer.tint = Color(0.8, 0.8, 1.0)
self.pcards_deal_flag = False
def deal_pcards(self):
if not self.pcards_deal_flag: return
self.pcards = []
if len(self.pguesses) == 0:
# First guess is random, then use it to create an initial (truncated) hypothesis space
self.pguesses.append("".join([chr(random.randint(0,self.ncolors-1)+ord(self.firstletter))
for x in range(self.ncols)]))
self.phyps = self.make_hypspace(self.pguesses[0])
else:
self.pguesses.append(random.sample(self.phyps,1)[0])
self.phyps = self.reduce_hypspace(self.phyps,self.pguesses[-1])
pcards_nos = self.codetocards(self.pguesses[-1])
for k in range(self.ncols):
card = Layer(Rect(self.offset.x + (self.ncols+1+k) * (self.card_size + self.sepx)+6*self.sepx,
self.height, self.card_size, self.card_size))
card.idx = pcards_nos[k]
if False:
# only reveal python's guesses if asked
card.background = self.colors[self.pcards[k]]
card.image = self.characters[self.pcards[k]]
else:
card.background = Color(0.8,0.8,1.0)
card.stroke = Color(0.3, 0.3, 0.6)
card.stroke_weight = 4.0
self.root_layer.add_layer(card)
self.pcards.append(card)
self.prows.append(self.pcards)
self.prows_revealed.append(False)
def game_win(self):
font_size = 100 if self.size.w > 700 else 50
text_layer = TextLayer('You Win!', 'Futura', font_size)
text_layer.frame.center(self.bounds.center())
overlay = Layer(self.bounds)
overlay.background = Color(0, 0, 0, 0)
overlay.add_layer(text_layer)
self.add_layer(overlay)
overlay.animate('background', Color(0.0, 0.2, 0.3, 0.7))
text_layer.animate('scale_x', 1.3, 0.3, autoreverse=True)
text_layer.animate('scale_y', 1.3, 0.3, autoreverse=True)
self.root_layer.animate('scale_x', 0.0, delay=2.0,
curve=curve_ease_back_in)
self.root_layer.animate('scale_y', 0.0, delay=2.0,
curve=curve_ease_back_in,
completion=self.game_over)
def game_over(self):
sound.play_effect('Powerup_2')
self.delay(0.5,self.setup)
def redandwhitepegs(self,test,truth):
res = list(self.default_result)
for p in range(0,len(truth)):
if test[p] == truth[p]:
res[p] = 'R'
elif test[p] in truth:
res[p] = 'W'
return "".join(sorted(res,key=lambda c: self.rpegs[c]))
def cardstocode(self,cards):
return "".join([chr(card.idx+ord(self.firstletter)) for card in cards])
def codetocards(self,code):
return [ord(c)-ord(self.firstletter) for c in list(code)]
def draw_result(self,rpegs,rcenter):
ns = int(math.ceil(sqrt(len(rpegs))))
sl = int(math.ceil(2.0/(sqrt(5.0)+1.0)*self.card_size/ns))
slx = sl + int(math.ceil((self.card_size - ns*sl)/(ns+2)))
rcenter.x -= (ns * sl + (ns-1) * (slx-sl))/2
rcenter.y += ((ns-2) * sl + (ns-1) * (slx-sl))/2
for k in xrange(len(rpegs)):
i, j = k / ns, k % ns
if rpegs[k] in 'RW':
rpeg = Layer(Rect(rcenter.x + j * slx,
rcenter.y - i * slx,
sl, sl))
rpeg.background = Color(1.0, 0.0, 1.0) if rpegs[k] == 'R' else Color(1.0, 1.0, 1.0)
self.root_layer.add_layer(rpeg)
# possible outcomes -- reduce from ncols**ncolors using the first guess
def make_hypspace(self,guess):
hyps = set()
result = self.redandwhitepegs(guess,self.truth)
firstnn = [0] * self.ncols
while firstnn[0] < self.ncolors:
hyp = "".join([chr(x+ord(self.firstletter)) for x in firstnn])
if result == self.redandwhitepegs(guess,hyp):
hyps.add(hyp)
firstnn[-1] += 1
for d in range(1,self.ncols):
if firstnn[-d] >= self.ncolors:
firstnn[-d] = 0; firstnn[-d-1] += 1
return hyps
def reduce_hypspace(self,hyps,guess):
newhyps = set()
result = self.redandwhitepegs(guess,self.truth)
for hyp in hyps:
if result == self.redandwhitepegs(guess,hyp):
newhyps.add(hyp)
return newhyps
def draw(self):
# Update and draw our root layer. For a layer-based scene, this
# is usually all you have to do in the draw method.
background(0, 0, 0)
self.root_layer.update(self.dt)
self.root_layer.draw()
def touch_began(self, touch):
# Animate the layer to the location of the touch:
#x, y = touch.location.x, touch.location.y
#new_frame = Rect(x - 64, y - 64, 128, 128)
#self.layer.animate('frame', new_frame, 1.0, curve=curve_bounce_out)
# Animate the background color to a random color:
for card in self.cards:
if touch.location in card.frame:
def reveal_card():
card.idx = card.icyc.next()
card.background = self.colors[card.idx]
card.image = self.characters[card.idx]
card.animate('scale_y', 1.0, 0.15)
card.animate('scale_y', 0.0, 0.15,
completion=reveal_card)
card.scale_x = 1.0
card.animate('scale_x', 0.9, 0.15, autoreverse=True)
sound.play_effect('Click_1')
time.sleep(0.2)
break
guess_card = self.guess_cards[-1]
if touch.location in guess_card.frame and not guess_card.revealed:
def reveal_card():
guess_card.background = Color(0.1, 0.1, 0.1)
guess_card.stroke = Color(1.0, 0.2, 1.0)
guess_card.revealed = True
guess_card.animate('scale_y', 1.0, 0.15)
guess_card.animate('scale_y', 0.0, 0.15,
completion=reveal_card)
self.guess_layer.remove_layer()
guess_card.scale_x = 1.0
guess_card.animate('scale_x', 0.9, 0.15, autoreverse=True)
sound.play_effect('8ve-slide-magic')
result = self.redandwhitepegs(self.cardstocode(self.cards),self.truth)
self.draw_result(result,guess_card.frame.center())
if result == 'R' * self.ncols:
self.game_win()
return
self.advance_row()
self.pcards_reveal_flag = False
for k in range(len(self.prows)):
pcards = self.codetocards(self.pguesses[k])
for l in range(len(self.prows[k])):
if touch.location in self.prows[k][l].frame:
self.pcards_reveal_flag = True
break
if self.pcards_reveal_flag: break
if self.pcards_reveal_flag:
cards = list(self.prows[k])
if self.prows_revealed[k]: cards.reverse()
if not self.prows_revealed[k]:
def reveal_cards(card):
card.background = self.colors[card.idx]
card.image = self.characters[card.idx]
card.animate('scale_y', 1.0, 0.15)
else:
def reveal_cards(card):
card.background = Color(0.8,0.8,1.0)
card.image = None
card.animate('scale_y', 1.0, 0.15)
for l in range(len(cards)):
card = cards[l]
card.animate('scale_y', 0.0, 0.15,l*0.05,
completion=partial(reveal_cards,card))
card.scale_x = 1.0
card.animate('scale_x', 0.9, 0.15, autoreverse=True)
if not self.prows_revealed[k]:
sound.play_effect('Woosh_1')
else:
sound.play_effect('Woosh_2')
self.prows_revealed[k] = not self.prows_revealed[k]
def touch_moved(self, touch):
pass
def touch_ended(self, touch):
pass
run(HungarianPhrasebook())
|
essandess/HungarianPhrasebook
|
HungarianPhrasebook.py
|
Python
|
mit
| 17,363
|
[
"Octopus"
] |
58ef927670c37d943e75c38e898a8900ce2a128de8ab4d50814751fc895f361d
|
import numpy as np
import scipy
from .math import strictly_positify, positify, clip01
def psf(img, sx, sy=None, angle=0):
"""
Return a Gaussian PSF of the same size as img.
img: image (reference for the output size)
sx: sigma value for the long axis
sy: sigma value for the short axis. If None take the same value as sx [default]
angle: geometric angle (in radian) of the long axis. [default: 0]
"""
from .math import Gauss
if sy is None:
sy = sx
x = np.arange(img.shape[1])
y = np.arange(img.shape[0])
X, Y = np.meshgrid(x,y)
X -= img.shape[1]//2
Y -= img.shape[0]//2
if angle != 0:
Xp = X*np.cos(angle) - Y*np.sin(angle)
Yp = X*np.sin(angle) + Y*np.cos(angle)
else:
Xp = X
Yp = Y
return Gauss(Xp, 0, sx)*Gauss(Yp, 0, sy)
def _rl(x, image, psf, type='default', extend=True, damping=0, ndamp=10):
"""
Richardson-Lucy core algorithm
Reference: L. B. Lucy / The Astronomical Journal / vol. 79 / No. 6 / June 1974 / pp. 745-754
By giving an estimate x_k this function returns the next estimate x_{k+1}.
x: x_k estimate
image: input image to enhance
psf: point spread functional
"""
I = strictly_positify(convolve(x, psf, type=type, extend=extend)) # reconvoluted estimation.
if damping != 0:
ratio = _rl_damped(I, image, damping=damping, ndamp=ndamp)
else:
ratio = image / I
return x * convolve(ratio, psf[::-1,::-1], type=type, extend=extend) # Correlation is the convolution of mirrored psf
def _rl_damped(I, image, gain=1, con_var=1, damping=1, ndamp=10):
""" Calculate the damping ratio
Parameters
----------
gain: float, int
CCD gain (relic?)
con_var: float, int, np.ndarray
Noise value or image
threshold: float, int
noise sigma threshold for dampening
ndamp: float, int
order of the dampening
"""
from .haar import hfilter
rrr = image - I
rrr = hfilter(rrr, (I+con_var)/gain, damping, ndamp=ndamp)
rrr[np.isnan(rrr)] = 0
ratio = gain*(1 + rrr / (I+con_var))
return ratio
def _rl_accelerate(x, x1, x2, g1=None, g2=None, order=1):
"""
Accelerated Richardson-Lucy algorithm.
Reference: David S. C. Biggs and Mark Andrews, Appl. Opt./ Vol. 36 / No. 8 / 10 March 1997 / pp. 1766-1775
Notation in reference to paper:
x = x_k
x1 = x_{k-1}
x2 = x_{k_2}
g1 = g_{k-1}
g2 = g_{k-2}
y = y_k
"""
if g2 is None:
alpha = 0 # Initialization
else:
alpha = np.sum(g1*g2)/strictly_positify(np.sum(g2**2)) # Eq. 10
alpha = clip01(alpha) # be sure α∈[0,1]
if alpha == 0:
return x # the prediction is the same as x (initialization)
h1 = x - x1 # Eq. 7
y = x + alpha * h1 # Eq. 6
if order>1:
h2 = x - 2*x1 + x2 # Eq. 17
y += h2 * alpha**2 / 2 # Eq. 14
return y
def richardson_lucy(image, psf, iterations, damping=0, ndamp=10,
core='default', acceleration=2, init='mean', extend=True, clip=False, **kargs):
"""
Richardson-Lucy algorithm
image: the image to enhance (numpy 2d array)
psf: the Point Spread Function (numpy 2d array)
iterations:
The number of iterations to perform. It can be either an integer or a list of integer.
For the later case, the returned solution is a dictionary with keys K and value being the enhancement after K interations.
T: Damping factor ( to be used with core='damped' )
N: N factor used with core='damped'
core:
default: default R-L algorithm using convolve from scipy.signal
fft: performs a fftconvolution
acceleration:
0: (No acceleration. standard R-L)
1: First order acceleration
2: Second order acceleration
higher orders are not yet implemented
damping:
damping factor. (0= no damping)
init:
'mean': the default. The start value for x is the mean of the image
'image': the start value x is the image itself
numpy array: if init is a 2d numpy array, its value will be used as init value for x
"""
assert core in ['default', 'fft', 'accurate']
image = image.astype(np.float)
psf = psf.astype(np.float)
psf /= np.sum(psf) # Normalize the psf ⇒ ∫∫ psf(x,y) dx dy = 1
if init is 'mean':
x = 0.5 * np.ones(image.shape)
elif init is 'image':
x = image
else:
x = init
# Is iterations a number of a list of number?
dict_output = True
if type(iterations) is int:
dict_output = False
iterations = [iterations]
N = max(iterations)
results = {}
x1 = x2 = None
g1 = g2 = None
for i in range(N):
if acceleration:
y = _rl_accelerate(x, x1, x2, g1, g2, order=acceleration)
else:
y = x
x_new = _rl(positify(y), image=image, psf=psf, extend=extend, type=core, damping=damping, ndamp=ndamp)
g2 = g1
g1 = x_new - y
x, x1, x2 = x_new, x, x1 # rotate elements for next iteration
if clip:
x[x<0] = 0
x[x>clip] = clip
if i+1 in iterations:
results[i+1] = np.copy(x)
if dict_output:
return results
return results[N]
def img_extend(img, margin, block=1):
I = np.pad(img, margin, 'constant')
for i in range(img.shape[1]):
I[:margin, i+margin] = np.mean(img[:block, i])
I[-margin:, i+margin] = np.mean(img[-block:, i])
for i in range(img.shape[0]):
I[i+margin, :margin] = np.mean(img[i, :block])
I[i+margin, -margin:] = np.mean(img[i, -block:])
I[:margin, :margin] = np.mean(img[:block, :block])
I[:margin, -margin:] = np.mean(img[:block, -block:])
I[-margin:, :margin] = np.mean(img[-block:, :block])
I[-margin:, -margin:] = np.mean(img[-block:, -block:])
return I
def convolve(img, psf, type='default', extend=True, mode='same', extend_margin=100, **kargs):
"""
Compute the convolution of two 2D signals: img and psf
type:
define the convolution type
"""
if extend is int:
extend_margin = extend
if extend:
img = img_extend(img, extend_margin)
if type is 'fft':
from scipy.signal import fftconvolve as conv
I = conv(img, psf, mode)
elif type is 'default':
from scipy.signal import convolve as conv
I = conv(img, psf, mode)
elif type is 'accurate':
from scipy.signal import convolve2d as convolve
I = conv(img, psf, mode)
elif type is 'fft2':
I = np.fft.fftshift((np.fft.irfft2(np.fft.rfft2(img) * np.fft.rfft2(psf))))
if extend:
I = I[extend_margin:-extend_margin, extend_margin:-extend_margin]
return I
|
scholi/pySPM
|
pySPM/utils/restoration.py
|
Python
|
apache-2.0
| 6,930
|
[
"Gaussian"
] |
01376e9632b70dc5b59ea602839df546353c36fb7fe44c209beaa53fc92acfec
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
import unittest
from MooseDocs.testing import MarkdownTestCase
class TestBibtexExtension(MarkdownTestCase):
EXTENSIONS = ['MooseDocs.extensions.bibtex']
@classmethod
def updateExtensions(cls, configs):
"""
Method to change the arguments that come from the configuration file for
specific tests. This way one can test optional arguments without permanently
changing the configuration file.
"""
configs['MooseDocs.extensions.bibtex']['macro_files'] =\
['docs/content/bib/macro_test_abbrev.bib']
def testCite(self):
md = r'\cite{testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_cite.html', md)
def testCiteTwo(self):
md = r'\cite{testkey, testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citeTwo.html', md)
def testCiteThree(self):
md = r'\cite{testkey, testkey, testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citeThree.html', md)
def testCitet(self):
md = r'\citet{testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citet.html', md)
def testCitetTwo(self):
md = r'\citet{testkey, testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citetTwo.html', md)
def testCitetThree(self):
md = r'\citet{testkey, testkey, testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citetThree.html', md)
def testCitep(self):
md = r'\citep{testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citep.html', md)
def testCitepTwo(self):
md = r'\citep{testkey, testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citepTwo.html', md)
def testCitepThree(self):
md = r'\citep{testkey, testkey, testkey}\n\bibliography{docs/content/bib/moose.bib}'
self.assertConvert('test_citepThree.html', md)
def testBibtexMacro(self):
md = r'\cite{macroTestKey}\n\bibliography{docs/content/bib/test.bib}'
self.assertConvert('test_bibtex_macro.html', md)
def testNoAuthor(self):
md = r'\cite{noAuthorTestKey}\n\bibliography{docs/content/bib/test.bib}'
self.assertConvert('test_no_author.html', md)
def testDuplicateError(self):
md = r'\cite{macroTestKey}\n\bibliography{docs/content/bib/test_duplicate.bib}'
self.convert(md)
self.assertInLogError('repeated bibliograhpy entry: macroTestKey', index=-3)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Chuban/moose
|
python/MooseDocs/tests/bibtex/test_bibtex.py
|
Python
|
lgpl-2.1
| 4,046
|
[
"MOOSE"
] |
3afc084f60d748efa6690e888e1335f421410460f01965d2fb5664b7b561439a
|
#
# This file is part of postpic.
#
# postpic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# postpic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with postpic. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Stephan Kuschel, 2018-2019
'''
.. _openPMD: https://github.com/openPMD/openPMD-standard
Support for hdf5 files following the openPMD_ Standard.
Dependecies:
- h5py: read hdf5 files with python
Written by Stephan Kuschel 2016
'''
from __future__ import absolute_import, division, print_function, unicode_literals
from . import Dumpreader_ifc
from . import Simulationreader_ifc
import numpy as np
import re
from .. import helper
from ..helper_fft import fft
__all__ = ['OpenPMDreader', 'FileSeries',
'FbpicReader', 'FbpicFileSeries']
class OpenPMDreader(Dumpreader_ifc):
'''
The Reader implementation for Data written in the hdf5 file
format following openPMD_ naming conventions.
Args:
h5file : String
A String containing the relative Path to the .h5 file.
'''
def __init__(self, h5file, **kwargs):
super(OpenPMDreader, self).__init__(h5file, **kwargs)
import os.path
import h5py
if not os.path.isfile(h5file):
raise IOError('File "' + str(h5file) + '" doesnt exist.')
self._h5 = h5py.File(h5file, 'r')
self._iteration = int(list(self._h5['data'].keys())[0])
self._data = self._h5['/data/{:d}/'.format(self._iteration)]
self.attrs = self._data.attrs
def __del__(self):
del self._data
# --- Level 0 methods ---
def keys(self):
return list(self._data.keys())
def __getitem__(self, key):
return self._data[key]
# --- Level 1 methods ---
def data(self, key):
'''
should work with any key, that contains data, thus on every hdf5.Dataset,
but not on hdf5.Group. Will extract the data, convert it to SI and return it
as a numpy array. Constant records will be detected and converted to
a numpy array containing a single value only.
'''
record = self[key]
if "value" in record.attrs:
# constant data (a single int or float)
ret = np.float64(record.attrs['value']) * record.attrs['unitSI']
else:
# array data
ret = np.float64(record[()]) * record.attrs['unitSI']
return ret
def gridoffset(self, key, axis):
axid = helper.axesidentify[axis]
if "gridUnitSI" in self[key].attrs:
attrs = self[key].attrs
else:
attrs = self[key].parent.attrs
return attrs['gridGlobalOffset'][axid] * attrs['gridUnitSI']
def gridspacing(self, key, axis):
axid = helper.axesidentify[axis]
if "gridUnitSI" in self[key].attrs:
attrs = self[key].attrs
else:
attrs = self[key].parent.attrs
return attrs['gridSpacing'][axid] * attrs['gridUnitSI']
def gridpoints(self, key, axis):
axid = helper.axesidentify[axis]
return self[key].shape[axid]
# --- Level 2 methods ---
def timestep(self):
return self._iteration
def time(self):
return np.float64(self.attrs['time'] * self.attrs['timeUnitSI'])
def simdimensions(self):
'''
the number of spatial dimensions the simulation was using.
'''
for k in self._simgridkeys():
try:
gs = self.gridspacing(k, None)
return len(gs)
except(KeyError):
pass
raise KeyError('number of simdimensions could not be retrieved for {}'.format(self))
def _keyE(self, component, **kwargs):
axsuffix = {0: 'x', 1: 'y', 2: 'z', 90: 'r', 91: 't'}[helper.axesidentify[component]]
return 'fields/E/{}'.format(axsuffix)
def _keyB(self, component, **kwargs):
axsuffix = {0: 'x', 1: 'y', 2: 'z', 90: 'r', 91: 't'}[helper.axesidentify[component]]
return 'fields/B/{}'.format(axsuffix)
def _simgridkeys(self):
return ['fields/E/x', 'fields/E/y', 'fields/E/z',
'fields/B/x', 'fields/B/y', 'fields/B/z']
def listSpecies(self):
ret = list(self['particles'].keys())
return ret
def getSpecies(self, species, attrib):
"""
Returns one of the attributes out of (x,y,z,px,py,pz,weight,ID,mass,charge) of
this particle species.
"""
attribid = helper.attribidentify[attrib]
options = {9: 'particles/{}/weighting',
0: 'particles/{}/position/x',
1: 'particles/{}/position/y',
2: 'particles/{}/position/z',
3: 'particles/{}/momentum/x',
4: 'particles/{}/momentum/y',
5: 'particles/{}/momentum/z',
10: 'particles/{}/id',
11: 'particles/{}/mass',
12: 'particles/{}/charge'}
optionsoffset = {0: 'particles/{}/positionOffset/x',
1: 'particles/{}/positionOffset/y',
2: 'particles/{}/positionOffset/z'}
key = options[attribid]
offsetkey = optionsoffset.get(attribid)
try:
data = self.data(key.format(species))
if offsetkey is not None:
data += self.data(offsetkey.format(species))
ret = np.asarray(data, dtype=np.float64)
except(IndexError):
raise KeyError
return ret
def getderived(self):
'''
return all other fields dumped, except E and B.
'''
ret = []
self['fields'].visit(ret.append)
ret = ['fields/{}'.format(r) for r in ret if not (r.startswith('E') or r.startswith('B'))]
ret = [r for r in ret if hasattr(self[r], 'value')]
ret.sort()
return ret
def __str__(self):
return '<OpenPMDh5reader at "' + str(self.dumpidentifier) + '">'
class FbpicReader(OpenPMDreader):
'''
Special OpenPMDreader for FBpic, which is using an expansion into radial modes.
This is subclass of the OpenPMDreader which is converting the modes to
a radial representation.
'''
def __init__(self, simidentifier, **kwargs):
super(FbpicReader, self).__init__(simidentifier, **kwargs)
@staticmethod
def modeexpansion(rawdata, theta=None, Ntheta=None):
'''
rawdata has to be shaped (Nm, Nr, Nz).
Returns an array of shape (Nr, Ntheta, Nz), with
`Ntheta = (Nm+1)//2`. If Ntheta is given only larger
values are permitted.
The corresponding values for theta are given by
`np.linspace(0, 2*np.pi, Ntheta, endpoint=False)`
'''
rawdata = np.asarray(rawdata)
Nm, Nr, Nz = rawdata.shape
if Ntheta is not None or theta is None:
return FbpicReader._modeexpansion_fft(rawdata, Ntheta=Ntheta)
else:
return FbpicReader._modeexpansion_naiv(rawdata, theta=theta)
@staticmethod
def _modeexpansion_naiv_single(rawdata, theta=0):
'''
The mode representation will be expanded for a given theta.
rawdata has to have the shape (Nm, Nr, Nz).
the returned array will be of shape (Nr, Nz).
'''
rawdata = np.float64(rawdata)
(Nm, Nr, Nz) = rawdata.shape
mult_above_axis = [1]
for mode in range(1, (Nm+1)//2):
cos = np.cos(mode * theta)
sin = np.sin(mode * theta)
mult_above_axis += [cos, sin]
mult_above_axis = np.float64(mult_above_axis)
F_total = np.tensordot(mult_above_axis,
rawdata, axes=(0, 0))
assert F_total.shape == (Nr, Nz), \
'''
Assertion error. Please open a new issue on github to report this.
shape={}, Nr={}, Nz={}
'''.format(F_total.shape, Nr, Nz)
return F_total
@staticmethod
def _modeexpansion_naiv(rawdata, theta=0):
'''
converts to radial data using `modeexpansion`, possibly for multiple
theta at once.
'''
if np.asarray(theta).shape is ():
# single theta
theta = [theta]
# multiple theta
data = np.asarray([FbpicReader._modeexpansion_naiv_single(rawdata, theta=t)
for t in theta])
# switch from (theta, r, z) to (r, theta, z)
data = data.swapaxes(0, 1)
return data
@staticmethod
def _modeexpansion_fft(rawdata, Ntheta=None):
'''
calculate the radialdata using an fft. This is by far the fastest
way to do the modeexpansion.
'''
Nm, Nr, Nz = rawdata.shape
Nth = (Nm+1)//2
if Ntheta is None or Ntheta < Nth:
Ntheta = Nth
fd = np.empty((Nr, Ntheta, Nz), dtype=np.complex128)
fd[:, 0, :].real = rawdata[0, :, :]
rawdatasw = np.swapaxes(rawdata, 0, 1)
fd[:, 1:Nth, :].real = rawdatasw[:, 1::2, :]
fd[:, 1:Nth, :].imag = rawdatasw[:, 2::2, :]
fd = fft.fft(fd, axis=1).real
return fd
# override inherited method to count points after mode expansion
def gridoffset(self, key, axis):
axid = helper.axesidentify[axis]
if axid == 91: # theta
return 0
else:
# r, theta, z
axidremap = {90: 0, 2: 1}[axid]
return super(FbpicReader, self).gridoffset(key, axidremap)
# override inherited method to count points after mode expansion
def gridspacing(self, key, axis):
axid = helper.axesidentify[axis]
if axid == 91: # theta
return 2 * np.pi / self.gridpoints(key, axis)
else:
# r, theta, z
axidremap = {90: 0, 2: 1}[axid]
return super(FbpicReader, self).gridspacing(key, axidremap)
# override inherited method to count points after mode expansion
def gridpoints(self, key, axis):
axid = helper.axesidentify[axis]
axid = axid % 90 # for r and theta
(Nm, Nr, Nz) = self[key].shape
# Ntheta does technically not exists because of the mode
# representation. To do a proper conversion from the modes to
# the grid, choose Ntheta based on the number of modes.
Ntheta = (Nm + 1) // 2
return (Nr, Ntheta, Nz)[axid]
# override
def _defaultaxisorder(self, gridkey):
return ('r', 'theta', 'z')
# override from OpenPMDreader
def data(self, key, **kwargs):
raw = super(FbpicReader, self).data(key) # SI conversion
if key.startswith('particles'):
return raw
# for fields expand the modes into a spatial grid first:
data = self.modeexpansion(raw, **kwargs) # modeexpansion
return data
def dataE(self, component, theta=None, Ntheta=None, **kwargs):
return self.data(self._keyE(component, **kwargs), theta=theta, Ntheta=Ntheta)
def dataB(self, component, theta=None, **kwargs):
return self.data(self._keyB(component, **kwargs), theta=theta, Ntheta=Ntheta)
# override
def __str__(self):
return '<FbpicReader at "' + str(self.dumpidentifier) + '">'
class FileSeries(Simulationreader_ifc):
'''
Reads a time series of dumps from a given directory.
The simidentifier is expanded using glob in order to
find matching files.
'''
def __init__(self, simidentifier, dumpreadercls=OpenPMDreader, **kwargs):
super(FileSeries, self).__init__(simidentifier, **kwargs)
self.dumpreadercls = dumpreadercls
import glob
self._dumpfiles = glob.glob(simidentifier)
self._dumpfiles.sort()
def _getDumpreader(self, n):
'''
Do not use this method. It will be called by __getitem__.
Use __getitem__ instead.
'''
return self.dumpreadercls(self._dumpfiles[n])
def __len__(self):
return len(self._dumpfiles)
def __str__(self):
return '<FileSeries at "' + self.simidentifier + '">'
class FbpicFileSeries(FileSeries):
def __init__(self, *args, **kwargs):
super(FbpicFileSeries, self).__init__(*args, **kwargs)
self.dumpreadercls = FbpicReader
|
skuschel/postpic
|
postpic/datareader/openPMDh5.py
|
Python
|
gpl-3.0
| 12,725
|
[
"VisIt"
] |
fcaa1c55c154b40d576d220b701b99bd9cc991fe679ffffef91177a8b78bc69e
|
import inspect
from functools import wraps
from motherbrain import conf
class Unauthorized(Exception):
def __init__(self, entity_name, entity_value, detail=None):
self.entity_name = entity_name
self.entity_value = entity_value
self.detail = detail
def __str__(self):
msg = 'Authorization Denied for {}: {}'.format(self.entity_name,
self.entity_value)
if self.detail:
msg = ', '.join([msg, self.detail])
return msg
def action(action_cluster):
def decorator(f):
def wrapped(*args, **kwargs):
# preserve wrapped decorated function argspec
wrapped.argspec = lambda: inspect.getargspec(f)
return f(*args, **kwargs)
wrapped.is_action = True
wrapped.action_cluster = action_cluster
return wraps(f)(wrapped)
return decorator
def secure_action(query_callback, action_cluster):
"""Secure Action Decorator
query_callback -- a method which should return
a tuple (identity, identity_value)
Identity should be the key used to fetch the result (identity_value)
query_callback will be invoked as follow:
query_callback(context)
query_callback should search for authentication key value
in @context
Example:
def callback_example(context):
username = context.get('username')
authorized_users = {'joe': ['Joe', 'White', 'joe@foo.com'],
'moe': ['Moe', 'Black', 'moe@bar.com']}
if username in authorized_users:
return (username, authorized_users[username])
return (username, None)
"""
def decorator(f):
@action(action_cluster)
def wrapped(*args, **kwargs):
context = args[0]
identity, identity_value = query_callback(context)
if not identity_value:
raise Unauthorized('user', identity, 'no match')
return f(*args, **kwargs)
# preserve wrapped decorated function argspec
wrapped.argspec = lambda: inspect.getargspec(f)
return wraps(f)(wrapped)
return decorator
|
urlist/urlist
|
motherbrain/workers/decorators.py
|
Python
|
gpl-3.0
| 2,243
|
[
"MOE"
] |
fec97d96a763629917dc4e7cb4ac9a87f032b05e388efc7c2268b75d48c383b2
|
from calendar import monthrange
from itertools import product
import multiprocessing
import os
import time
import numpy as np
import pandas as pd
from scipy import spatial
import xarray as xr
from gsee.climatedata_interface.pre_gsee_processing import resample_for_gsee
from gsee.climatedata_interface import util
def run_interface_from_dataset(
data: xr.Dataset,
params: dict,
frequency="detect",
pdfs_file="builtin",
num_cores=multiprocessing.cpu_count(),
) -> xr.Dataset:
"""
Parameters
----------
data: xarray Dataset
containing at lest one variable 'global_horizontal' with mean
global horizontal irradiance in W/m2.
Optional variables: 'diffuse_fraction', 'temperature' in °C
params: dict
Parameters for GSEE, i.e. 'tilt', 'azim',
'tracking', 'capacity'. tilt can be a function depending on
latitude -- see example input. Tracking can be 0, 1, 2 for no
tracking, 1-axis tracking, 2-axis tracking.
frequency: str, optional
Frequency of the input data. One of ['A', 'S', 'M', 'D', 'H'],
for annual, seasonal, monthly, daily, hourly. Defaults to 'detect',
whith attempts to automatically detect the correct frequency.
pdfs_file: str, optional
Path to a NetCDF file with probability density functions to use
for each month. Only for annual, seasonal and monthly data.
Default is 'builtin', which automatically downloads and uses a
built-in global PDF based on MERRA-2 data. Set to None to disable.
num_cores: int, optional
Number of cores that should be used for the computation.
Default is all available cores.
Returns
-------
xarray Dataset
PV power output in Wh/hour if frequency is 'H', else in Wh/day
"""
frequency = _detect_frequency(data, frequency)
# Produce list of coordinates of all grid points to iterate over
coord_list = list(product(data["lat"].values, data["lon"].values))
# Modify time dimension so it fits the requirements of
# the "resample_for_gsee" function
data["time"] = _mod_time_dim(pd.to_datetime(data["time"].values), frequency)
# Shareable list with a place for every coordinate in the grid
manager = multiprocessing.Manager()
shr_mem = manager.list([None] * len(coord_list))
# Store length of coordinate list in prog_mem to draw
# the progress bar dynamically
prog_mem = manager.list()
prog_mem.append(len(coord_list))
start = time.time()
if pdfs_file is not None:
if frequency in ["A", "S", "M"]:
pdfs_path = util.return_pdf_path() if pdfs_file == "builtin" else pdfs_file
pdfs = xr.open_dataset(pdfs_path)
pdf_coords = list(product(pdfs["lat"].values, pdfs["lon"].values))
tree = spatial.KDTree(pdf_coords)
coord_list_nn = [pdf_coords[int(tree.query([x])[1])] for x in coord_list]
else:
raise ValueError(
'For frequencies other than "A", "M", or "D", '
"`pdfs_file` must be explicitly set to None."
)
if num_cores > 1:
from joblib import Parallel, delayed, wrap_non_picklable_objects
from joblib.parallel import get_active_backend
print("Parallel mode: {} cores".format(num_cores))
Parallel(n_jobs=num_cores)(
delayed(wrap_non_picklable_objects(resample_for_gsee))(
data.sel(lat=coords[0], lon=coords[1]),
frequency,
params,
i,
coords,
shr_mem,
prog_mem,
None
if pdfs_file is None
else pdfs.sel(lat=coord_list_nn[i][0], lon=coord_list_nn[i][1]),
)
for i, coords in enumerate(coord_list)
)
else:
print("Single core mode")
for i, coords in enumerate(coord_list):
resample_for_gsee(
data.sel(lat=coords[0], lon=coords[1]),
frequency,
params,
i,
coords,
shr_mem,
prog_mem,
None
if pdfs_file is None
else pdfs.sel(lat=coord_list_nn[i][0], lon=coord_list_nn[i][1]),
)
end = time.time()
print("\nComputation part took: {} seconds".format(str(round(end - start, 2))))
# Stitch together the data
result = xr.Dataset()
for piece in shr_mem:
if type(piece) == type(data):
result = xr.merge([result, piece])
result = result.transpose("time", "lat", "lon")
result["time"] = data["time"]
if frequency == "H":
result["pv"].attrs["unit"] = "Wh"
elif frequency in ["A", "S", "M", "D"]:
result["pv"].attrs["unit"] = "Wh/day"
return result
def run_interface(
ghi_data: tuple,
outfile: str,
params: dict,
frequency="detect",
diffuse_data=("", ""),
temp_data=("", ""),
timeformat=None,
pdfs_file="builtin",
num_cores=multiprocessing.cpu_count(),
):
"""
Input file must include 'time', 'lat' and 'lon' dimensions.
Parameters
----------
ghi_data: tuple
Tuple with path to a NetCDF file with diffuse fraction data
and variable name in that file.
outfile: string
Path to NetCDF file to store output in.
params: dict
Parameters for GSEE, i.e. 'tilt', 'azim',
'tracking', 'capacity'. tilt can be a function depending on
latitude -- see example input. Tracking can be 0, 1, 2 for no
tracking, 1-axis tracking, 2-axis tracking.
frequency: str, optional
Frequency of the input data. One of ['A', 'S', 'M', 'D', 'H'],
for annual, seasonal, monthly, daily, hourly. Defaults to 'detect',
whith attempts to automatically detect the correct frequency.
diffuse_data: tuple, optional
Tuple with path to a NetCDF file with diffuse fraction data
and variable name in that file. If not given, BRL model is
used to estimate diffuse fraction.
temp_data: tuple, optional
Tuple with path to a NetCDF file with temperature data (°C or °K)
and variable name in that file. If not given, constant
temperatore of 20 degrees C is assumed.
timeformat: string, optional
If set to 'cmip', 'cmip5', or 'cmip6', then the date format common
in the CMIP datasets (e.g. '20070104.5') is correctly dealt with.
Otherwise it is left to xarray to detect the time format.
pdfs_file: str, optional
Path to a NetCDF file with probability density functions to use
for each month. Only for annual, seasonal and monthly data.
Default is 'builtin', which automatically downloads and uses a
built-in global PDF based on MERRA-2 data. Set to None to disable.
num_cores: int, optional
Number of cores that should be used for the computation.
Default is all available cores.
Returns
-------
None
"""
# Read Files:
ds_merged, ds_in = _open_files(ghi_data, diffuse_data, temp_data)
if timeformat in ["cmip", "cmip5", "cmip6"]:
try:
ds_merged["time"] = _parse_cmip_time_data(ds_merged)
except Exception:
raise ValueError(
'Parsing of "cmip5" time dimension failed. Set timeformat to None, or check your data.'
)
# Check whether the time dimension was recognised correctly and interpreted as time by dataset
if not type(ds_merged["time"].values[0]) is np.datetime64:
raise ValueError(
'Time format not recognised. Try setting timeformat="cmip5" or check your data.'
)
if os.path.isfile(outfile):
print("{} already exists --> skipping".format(outfile.split("/", -1)[-1]))
else:
print(
"{} does not yet exist --> Computing in ".format(
outfile.split("/", -1)[-1]
),
end="",
)
ds_pv = run_interface_from_dataset(
data=ds_merged,
params=params,
frequency=frequency,
pdfs_file=pdfs_file,
num_cores=num_cores,
)
# Kill leftover coordinates that no variable is indexed over
coords_to_kill = [i for i in ds_pv.coords if i not in ds_pv.dims]
for coord in coords_to_kill:
del ds_pv[coord]
# Carry over CF attributes from the remaining dimensions
for attr in ["standard_name", "long_name", "units", "axis"]:
for dim in ds_pv.dims:
if attr in ds_in[dim].attrs and attr not in ds_pv[dim].attrs:
ds_pv[dim].attrs[attr] = ds_in[dim].attrs[attr]
# Clean up time dimension before saving back to disk.
# When xarray encounters time atributes it cannot parse, it
# persists them, and later raises an exception when trying to save
# results back to NetCDF and attempting to serialise our parsed time
# dimension back to units/calendar attributes that it expects
# not to already exist.
for attr in ["units", "calendar"]:
if attr in ds_pv.time.attrs:
del ds_pv.time.attrs[attr]
# Save results with zlib compression
encoding_params = {"zlib": True, "complevel": 4}
encoding = {k: encoding_params for k in list(ds_pv.data_vars)}
ds_pv.to_netcdf(path=outfile, format="NETCDF4", encoding=encoding)
# ----------------------------------------------------------------------------------------------------------------------
# Support functions for run_interface_from_dataset:
# ----------------------------------------------------------------------------------------------------------------------
def _mod_time_dim(time_dim: pd.date_range, freq: str):
"""
Modify Time dimension so it fits the requirements of the "resample_for_gsee" function
Parameters
----------
time_dim: array
with datetime entries
freq: string
representing data frequency of na_time
Returns
-------
array
modified time dimension
"""
if freq == "A":
# Annual data is set to the beginning of the year
return time_dim.map(
lambda x: pd.Timestamp(year=x.year, month=1, day=1, hour=0, minute=0)
)
elif freq in ["S", "M"]:
# Seasonal data is set to middle of month, as it is often represented with the day in the middle of the season.
# Monthly data is set to middle of month
return time_dim.map(
lambda x: pd.Timestamp(
year=x.year,
month=x.month,
day=int(monthrange(x.year, x.month)[1] / 2),
hour=0,
minute=0,
)
)
elif freq == "D":
# Daily data is set to 00:00 hours of the day
return time_dim.map(
lambda x: pd.Timestamp(
year=x.year, month=x.month, day=x.day, hour=0, minute=0
)
)
else:
return time_dim
# ----------------------------------------------------------------------------------------------------------------------
# Support functions for run_interface:
# ----------------------------------------------------------------------------------------------------------------------
def _detect_frequency(ds: xr.Dataset, frequency="detect"):
"""
Tries to detect the frequency of the given dataset.
Raises Warning if the detected freqency does not match that
given in frequency, if frequency is not set to 'detect'.
Parameters
----------
ds : xarray Dataset
Must contain a 'time' dimension.
frequency : str, optional
Optionalluy set this to frequencuy given by user: one of
['A', 'S', 'M', 'D', 'H'] for annual, seasonal, monthly, daily, hourly.
Returns
-------
data_freq : str
Detected or validated frequency.
"""
# Tries to detect frequency, otherwise falls back to manual entry, also compares if the two match:
nc_freq = None
try:
nc_freq = ds.attrs["frequency"]
except KeyError:
try:
nc_freq = pd.DatetimeIndex(data=ds["time"].values).inferred_freq[0]
except:
pass
if not nc_freq:
print("> No frequency detected --> checking manually given frequency", end="")
if frequency in ["A", "S", "M", "D", "H"]:
print("...Manual entry is valid")
data_freq = frequency
else:
raise ValueError("Detect failed or manual entry is invalid.")
else:
if nc_freq == "year":
data_freq = "A"
elif nc_freq == "mon":
data_freq = "M"
elif nc_freq == "day":
data_freq = "D"
else:
data_freq = nc_freq
print("> Detected frequency: {}".format(data_freq))
if frequency == "S" and data_freq not in ["A", "M", "D", "H"]:
print(
'> Frequency is detected, but is not "A", "M", "D", or "H" thus assumed some kind of seasonal'
)
return frequency
if (
data_freq in ["A", "S", "M", "D", "H"]
and frequency != data_freq
and frequency != "detect"
):
raise Warning(
"\tManual given frequency is valid, however it does not match detected frequency. Check settings!"
)
if data_freq not in ["A", "S", "M", "D", "H"]:
raise ValueError(
'> Time frequency invalid, use one from ["A", "S", "M", "D", "H"]'
)
return data_freq
def _parse_cmip_time_data(ds: xr.Dataset):
"""
Converts time data saved as number with format "day as %Y%m%d.%f" to datetime64 format
Parameters
----------
ds: xarray dataset
with 'time' dimension in "day as %Y%m%d.%f" format
Returns
-------
array
with converted datetime64 entries
"""
# Translates date-string used in CMIP5 data to datetime-objects
timestr = [str(ti) for ti in ds["time"].values]
vfunc = np.vectorize(
lambda x: np.datetime64(
"{}-{}-{}T{:02d}:{}".format(
x[:4], x[4:6], x[6:8], int(24 * float("0." + x[9:])), "00"
)
)
)
return vfunc(timestr)
def _open_files(ghi_data: tuple, diffuse_data: tuple, temp_data: tuple):
"""
Opens the given files for GHI, diffuse Fraction and temperature, extracts the corresponding variables
and merges all three together to one dataset.
Parameters
----------
ghi_data: Tuple
with Filepath for .nc file with diffuse fraction data and variable name in that file
diffuse_data: Tuple
Tuple with Filepath for .nc file with diffuse fraction data and variable name in that file
temp_data: Tuple
Tuple with Filepath for .nc file with temperature data (°C or °K) and variable name in that file
Returns
-------
ds_tot: xarray dataset
merged dataset with all available variables: global_horizontal, diffuse_fraction, temperature
ds_th_in: xarray dataset
dataset of input file without any being processed. Is used later to detect frequency
"""
ghi_file, ghi_var = ghi_data
diffuse_file, diffuse_var = diffuse_data
temp_file, temp_var = temp_data
try:
ds_ghi_in = xr.open_dataset(ghi_file)
except Exception:
raise FileNotFoundError("Radiation file not found")
# makes sure only the specified variable gets used further:
ds_ghi = ds_ghi_in[ghi_var].to_dataset()
ds_merged = ds_ghi.rename({ghi_var: "global_horizontal"})
# Open diffuse_fraction file:
try:
ds_diffuse_in = xr.open_dataset(diffuse_file)
ds_diffuse = ds_diffuse_in[diffuse_var].to_dataset()
if ds_ghi.dims != ds_diffuse.dims:
raise ValueError(
"Dimension of diffuse fraciton file does not match radiation file"
)
ds_merged = xr.merge([ds_merged, ds_diffuse]).rename(
{diffuse_var: "diffuse_fraction"}
)
except OSError:
print("> No diffuse fraction file found -> will calculate with BRL-Model")
# Open temperature file:
try:
ds_temp_in = xr.open_dataset(temp_file)
ds_temp = ds_temp_in[temp_var].to_dataset()
if ds_temp[temp_var].mean().values > 200:
print("> Average temperature above 200° detected --> will convert to °C")
ds_temp = ds_temp - 273.15 # convert form kelvin to celsius
if ds_ghi.dims != ds_temp.dims:
raise ValueError(
"Dimension of temperature file does not match radiation file"
)
ds_merged = xr.merge([ds_merged, ds_temp]).rename({temp_var: "temperature"})
except OSError:
print("> No temperature file found -> will assume 20°C default value")
assert ds_merged.dims == ds_ghi.dims
return ds_merged, ds_ghi_in
|
renewables-ninja/gsee
|
gsee/climatedata_interface/interface.py
|
Python
|
bsd-3-clause
| 17,053
|
[
"NetCDF"
] |
32e7e4d99447582dff10d576a6f9302b577d372f9bf3306a55f9b49b0bddfe5a
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Author: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import array2d, as_float_array, check_random_state
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w: array of shape(n), to be orthogonalized
W: array of shape(p, n), null space definition
j: int < p
caveats
-------
assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
K = np.dot(W, W.T)
s, u = linalg.eigh(K)
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
W = np.dot(np.dot(np.dot(u, np.diag(1.0 / np.sqrt(s))), u.T), W)
return W
def _ica_def(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=float)
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
n_iterations = 0
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
while ((lim > tol) & (n_iterations < (max_iter - 1))):
wtx = np.dot(w.T, X)
gwtx = g(wtx, fun_args)
g_wtx = gprime(wtx, fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
n_iterations = n_iterations + 1
W[j, :] = w
return W
def _ica_par(X, tol, g, gprime, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
n, p = X.shape
W = _sym_decorrelation(w_init)
# we set lim to tol+1 to be sure to enter at least once in next while
lim = tol + 1
it = 0
while ((lim > tol) and (it < (max_iter - 1))):
wtx = np.dot(W, X)
gwtx = g(wtx, fun_args)
g_wtx = gprime(wtx, fun_args)
W1 = np.dot(gwtx, X.T) / float(p) \
- np.dot(np.diag(g_wtx.mean(axis=1)), W)
W1 = _sym_decorrelation(W1)
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
it += 1
return W
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_prime='', fun_args={}, max_iter=200,
tol=1e-04, w_init=None, random_state=None):
"""Perform Fast Independent Component Analysis.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten: boolean, optional
If true perform an initial whitening of the data. Do not set to
false unless the data is already white, as you will get incorrect
results.
If whiten is true, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
fun : string or function, optional
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function but in this case, its
derivative should be provided via argument fun_prime
fun_prime : empty string ('') or function, optional
See fun.
fun_args: dictionary, optional
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter: int, optional
Maximum number of iterations to perform
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged
w_init: (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used
source_only: boolean, optional
if True, only the sources matrix is returned
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
K: (n_components, p) array or None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n.comp principal components. If whiten is 'False', K is
'None'.
W: (n_components, n_components) array
estimated un-mixing matrix
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S: (n_components, n) array
estimated source matrix
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
# make interface compatible with other decompositions
X = array2d(X).T
algorithm_funcs = {'parallel': _ica_par,
'deflation': _ica_def}
alpha = fun_args.get('alpha', 1.0)
if (alpha < 1) or (alpha > 2):
raise ValueError("alpha must be in [1,2]")
if isinstance(fun, str):
# Some standard nonlinear functions
# XXX: these should be optimized, as they can be a bottleneck.
if fun == 'logcosh':
def g(x, fun_args):
alpha = fun_args.get('alpha', 1.0)
return np.tanh(alpha * x)
def gprime(x, fun_args):
alpha = fun_args.get('alpha', 1.0)
return alpha * (1 - (np.tanh(alpha * x)) ** 2)
elif fun == 'exp':
def g(x, fun_args):
return x * np.exp(-(x ** 2) / 2)
def gprime(x, fun_args):
return (1 - x ** 2) * np.exp(-(x ** 2) / 2)
elif fun == 'cube':
def g(x, fun_args):
return x ** 3
def gprime(x, fun_args):
return 3 * x ** 2
else:
raise ValueError(
'fun argument should be one of logcosh, exp or cube')
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
def gprime(x, fun_args):
return fun_prime(x, **fun_args)
else:
raise ValueError('fun argument should be either a string '
'(one of logcosh, exp or cube) or a function')
n, p = X.shape
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X = X - X.mean(axis=-1)[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=True)
if w_init is None:
w_init = random_state.normal(size=(n_components, n_components))
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError("w_init has invalid shape -- should be %(shape)s"
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'gprime': gprime,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
func = algorithm_funcs.get(algorithm, 'parallel')
W = func(X1, **kwargs)
del X1
if whiten:
S = np.dot(np.dot(W, K), X)
return K, W, S.T
else:
S = np.dot(W, X)
return None, W, S.T
class FastICA(BaseEstimator):
"""FastICA; a fast algorithm for Independent Component Analysis
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : {'logcosh', 'exp', or 'cube'}, or a callable
The non-linear function used in the FastICA loop to approximate
negentropy. If a function is passed, it derivative should be
passed as the 'fun_prime' argument.
fun_prime : None or a callable
The derivative of the non-linearity used.
max_iter : int, optional
Maximum number of iterations during fit
tol : float, optional
Tolerance on update at each iteration
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`unmixing_matrix_` : 2D array, [n_components, n_samples]
The unmixing matrix
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_prime='', fun_args=None, max_iter=200,
tol=1e-4, w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_prime = fun_prime
self.fun_args = {} if fun_args is None else fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def fit(self, X):
whitening_, unmixing_, sources_ = fastica(X, self.n_components,
self.algorithm, self.whiten,
self.fun, self.fun_prime, self.fun_args, self.max_iter,
self.tol, self.w_init,
random_state=self.random_state)
if self.whiten == True:
self.unmixing_matrix_ = np.dot(unmixing_, whitening_)
else:
self.unmixing_matrix_ = unmixing_
self.components_ = sources_
return self
def transform(self, X):
"""Apply un-mixing matrix "W" to X to recover the sources
S = X * W.T
"""
return np.dot(X, self.unmixing_matrix_.T)
def get_mixing_matrix(self):
"""Compute the mixing matrix
"""
return linalg.pinv(self.unmixing_matrix_)
|
cdegroc/scikit-learn
|
sklearn/decomposition/fastica_.py
|
Python
|
bsd-3-clause
| 12,450
|
[
"Gaussian"
] |
d009d763b9b5ec22e3dae1f30c778ef49bd1fae48670bf1a0f3634a339859c30
|
"""
Spatial Error Models with regimes module
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import multiprocessing as mp
import regimes as REGI
import user_output as USER
import summary_output as SUMMARY
from pysal import lag_spatial
from ols import BaseOLS
from twosls import BaseTSLS
from error_sp import BaseGM_Error, BaseGM_Endog_Error, _momentsGM_Error
from utils import set_endog, iter_msg, sp_att, set_warn
from utils import optim_moments, get_spFilter, get_lags
from utils import spdot, RegressionPropsY
from platform import system
class GM_Error_Regimes(RegressionPropsY, REGI.Regimes_Frame):
"""
GMM method for a spatial error model with regimes, with results and diagnostics;
based on Kelejian and Prucha (1998, 1999)[1]_ [2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
Always False, kept for consistency, ignored.
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial error model, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations. To do that, we can open an already existing gal file or
create a new one. In this case, we will create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Error_Regimes(y, x, regimes, w=w, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT.dbf')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Alternatively, we can have a summary of the
output by typing: model.summary
>>> print model.name_x
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', 'lambda']
>>> np.around(model.betas, decimals=6)
array([[ 0.074807],
[ 0.786107],
[ 0.538849],
[ 5.103756],
[ 1.196009],
[ 0.600533],
[ 0.364103]])
>>> np.around(model.std_err, decimals=6)
array([ 0.379864, 0.152316, 0.051942, 0.471285, 0.19867 , 0.057252])
>>> np.around(model.z_stat, decimals=6)
array([[ 0.196932, 0.843881],
[ 5.161042, 0. ],
[ 10.37397 , 0. ],
[ 10.829455, 0. ],
[ 6.02007 , 0. ],
[ 10.489215, 0. ]])
>>> np.around(model.sig2, decimals=6)
28.172732
"""
def __init__(self, y, x, regimes, w,
vm=False, name_y=None, name_x=None, name_w=None,
constant_regi='many', cols2regi='all', regime_err_sep=False,
regime_lag_sep=False,
cores=False, name_ds=None, name_regimes=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_w = USER.set_name_w(name_w, w)
self.name_regimes = USER.set_name_ds(name_regimes)
self.n = n
self.y = y
x_constant = USER.check_constant(x)
name_x = USER.set_name_x(name_x, x)
self.name_x_r = name_x
cols2regi = REGI.check_cols2regi(constant_regi, cols2regi, x)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
self.regime_err_sep = regime_err_sep
if regime_err_sep == True:
if set(cols2regi) == set([True]):
self._error_regimes_multi(y, x, regimes, w, cores,
cols2regi, vm, name_x)
else:
raise Exception, "All coefficients must vary accross regimes if regime_err_sep = True."
else:
self.x, self.name_x = REGI.Regimes_Frame.__init__(self, x_constant,
regimes, constant_regi=None, cols2regi=cols2regi, names=name_x)
ols = BaseOLS(y=y, x=self.x)
self.k = ols.x.shape[1]
moments = _momentsGM_Error(w, ols.u)
lambda1 = optim_moments(moments)
xs = get_spFilter(w, lambda1, x_constant)
ys = get_spFilter(w, lambda1, y)
xs = REGI.Regimes_Frame.__init__(self, xs,
regimes, constant_regi=None, cols2regi=cols2regi)[0]
ols2 = BaseOLS(y=ys, x=xs)
# Output
self.predy = spdot(self.x, ols2.betas)
self.u = y - self.predy
self.betas = np.vstack((ols2.betas, np.array([[lambda1]])))
self.sig2 = ols2.sig2n
self.e_filtered = self.u - lambda1 * lag_spatial(w, self.u)
self.vm = self.sig2 * ols2.xtxi
self.title = "SPATIALLY WEIGHTED LEAST SQUARES - REGIMES"
self.name_x.append('lambda')
self.kf += 1
self.chow = REGI.Chow(self)
self._cache = {}
SUMMARY.GM_Error(reg=self, w=w, vm=vm, regimes=True)
def _error_regimes_multi(self, y, x, regimes, w, cores,
cols2regi, vm, name_x):
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
results_p = {}
"""
for r in self.regimes_set:
if system() == 'Windows':
results_p[r] = _work_error(*(y,x,regi_ids,r,w,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes))
is_win = True
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work_error,args=(y,x,regi_ids,r,w,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes, ))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work_error, args=(
y, x, regi_ids, r, w, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes, ))
else:
results_p[r] = _work_error(
*(y, x, regi_ids, r, w, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes))
self.kryd = 0
self.kr = len(cols2regi)
self.kf = 0
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * (self.kr + 1), 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
self.e_filtered = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x = [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * (self.kr + 1)):((counter + 1) * (self.kr + 1)), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.e_filtered[regi_ids[r], ] = results[r].e_filtered
self.name_y += results[r].name_y
self.name_x += results[r].name_x
counter += 1
self.chow = REGI.Chow(self)
self.multi = results
SUMMARY.GM_Error_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
class GM_Endog_Error_Regimes(RegressionPropsY, REGI.Regimes_Frame):
'''
GMM method for a spatial error model with regimes and endogenous variables, with
results and diagnostics; based on Kelejian and Prucha (1998, 1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
Always False, kept for consistency, ignored.
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z : array
nxk array of variables (combination of x and yend)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2 : float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi : ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
For the endogenous models, we add the endogenous variable RD90 (resource deprivation)
and we decide to instrument for it with FP89 (families below poverty):
>>> yd_var = ['RD90']
>>> yend = np.array([db.by_col(name) for name in yd_var]).T
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Endog_Error_Regimes(y, x, yend, q, regimes, w=w, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT.dbf')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
endogenous variables included. Alternatively, we can have a summary of the
output by typing: model.summary
>>> print model.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', 'lambda']
>>> np.around(model.betas, decimals=5)
array([[ 3.59718],
[ 1.0652 ],
[ 0.15822],
[ 9.19754],
[ 1.88082],
[-0.24878],
[ 2.46161],
[ 3.57943],
[ 0.25564]])
>>> np.around(model.std_err, decimals=6)
array([ 0.522633, 0.137555, 0.063054, 0.473654, 0.18335 , 0.072786,
0.300711, 0.240413])
'''
def __init__(self, y, x, yend, q, regimes, w, cores=False,
vm=False, constant_regi='many', cols2regi='all',
regime_err_sep=False, regime_lag_sep=False, name_y=None,
name_x=None, name_yend=None, name_q=None, name_w=None,
name_ds=None, name_regimes=None, summ=True, add_lag=False):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_ds = USER.set_name_ds(name_ds)
self.name_regimes = USER.set_name_ds(name_regimes)
self.name_w = USER.set_name_w(name_w, w)
self.n = n
self.y = y
name_x = USER.set_name_x(name_x, x)
if summ:
name_yend = USER.set_name_yend(name_yend, yend)
self.name_y = USER.set_name_y(name_y)
name_q = USER.set_name_q(name_q, q)
self.name_x_r = name_x + name_yend
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
self.regime_err_sep = regime_err_sep
if regime_err_sep == True:
if set(cols2regi) == set([True]):
self._endog_error_regimes_multi(y, x, regimes, w, yend, q, cores,
cols2regi, vm, name_x, name_yend, name_q, add_lag)
else:
raise Exception, "All coefficients must vary accross regimes if regime_err_sep = True."
else:
x_constant = USER.check_constant(x)
q, name_q = REGI.Regimes_Frame.__init__(self, q,
regimes, constant_regi=None, cols2regi='all', names=name_q)
x, name_x = REGI.Regimes_Frame.__init__(self, x_constant,
regimes, constant_regi=None, cols2regi=cols2regi,
names=name_x)
yend2, name_yend = REGI.Regimes_Frame.__init__(self, yend,
regimes, constant_regi=None,
cols2regi=cols2regi, yend=True, names=name_yend)
tsls = BaseTSLS(y=y, x=x, yend=yend2, q=q)
self.k = tsls.z.shape[1]
self.x = tsls.x
self.yend, self.z = tsls.yend, tsls.z
moments = _momentsGM_Error(w, tsls.u)
lambda1 = optim_moments(moments)
xs = get_spFilter(w, lambda1, x_constant)
xs = REGI.Regimes_Frame.__init__(self, xs,
regimes, constant_regi=None, cols2regi=cols2regi)[0]
ys = get_spFilter(w, lambda1, y)
yend_s = get_spFilter(w, lambda1, yend)
yend_s = REGI.Regimes_Frame.__init__(self, yend_s,
regimes, constant_regi=None, cols2regi=cols2regi,
yend=True)[0]
tsls2 = BaseTSLS(ys, xs, yend_s, h=tsls.h)
# Output
self.betas = np.vstack((tsls2.betas, np.array([[lambda1]])))
self.predy = spdot(tsls.z, tsls2.betas)
self.u = y - self.predy
self.sig2 = float(np.dot(tsls2.u.T, tsls2.u)) / self.n
self.e_filtered = self.u - lambda1 * lag_spatial(w, self.u)
self.vm = self.sig2 * tsls2.varb
self.name_x = USER.set_name_x(name_x, x, constant=True)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda')
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.kf += 1
self.chow = REGI.Chow(self)
self._cache = {}
if summ:
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES - REGIMES"
SUMMARY.GM_Endog_Error(reg=self, w=w, vm=vm, regimes=True)
def _endog_error_regimes_multi(self, y, x, regimes, w, yend, q, cores,
cols2regi, vm, name_x, name_yend, name_q, add_lag):
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
if add_lag != False:
self.cols2regi += [True]
cols2regi += [True]
self.predy_e = np.zeros((self.n, 1), float)
self.e_pred = np.zeros((self.n, 1), float)
results_p = {}
for r in self.regimes_set:
"""
if system() == 'Windows':
results_p[r] = _work_endog_error(*(y,x,yend,q,regi_ids,r,w,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag))
is_win = True
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work_endog_error,args=(y,x,yend,q,regi_ids,r,w,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag, ))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work_endog_error, args=(
y, x, yend, q, regi_ids, r, w, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag, ))
else:
results_p[r] = _work_endog_error(
*(y, x, yend, q, regi_ids, r, w, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag))
self.kryd, self.kf = 0, 0
self.kr = len(cols2regi)
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * (self.kr + 1), 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
self.e_filtered = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x, self.name_yend, self.name_q, self.name_z, self.name_h = [
], [], [], [], [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * (self.kr + 1)):((counter + 1) * (self.kr + 1)), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.e_filtered[regi_ids[r], ] = results[r].e_filtered
self.name_y += results[r].name_y
self.name_x += results[r].name_x
self.name_yend += results[r].name_yend
self.name_q += results[r].name_q
self.name_z += results[r].name_z
self.name_h += results[r].name_h
if add_lag != False:
self.predy_e[regi_ids[r], ] = results[r].predy_e
self.e_pred[regi_ids[r], ] = results[r].e_pred
counter += 1
self.chow = REGI.Chow(self)
self.multi = results
if add_lag != False:
SUMMARY.GM_Combo_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
else:
SUMMARY.GM_Endog_Error_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
class GM_Combo_Regimes(GM_Endog_Error_Regimes, REGI.Regimes_Frame):
"""
GMM method for a spatial lag and error model with regimes and endogenous
variables, with results and diagnostics; based on Kelejian and Prucha (1998,
1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always needed)
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
If True, the spatial parameter for spatial lag is also
computed according to different regimes. If False (default),
the spatial parameter is fixed accross regimes.
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z : array
nxk array of variables (combination of x and yend)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi : ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
If True, the spatial parameter for spatial lag is also
computed according to different regimes. If False (default),
the spatial parameter is fixed accross regimes.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial lag model, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations. To do that, we can open an already existing gal file or
create a new one. In this case, we will create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Combo_Regimes(y, x, regimes, w=w, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
spatial lag of the dependent variable. We can have a summary of the
output by typing: model.summary
Alternatively, we can check the betas:
>>> print model.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '_Global_W_HR90', 'lambda']
>>> print np.around(model.betas,4)
[[ 1.4607]
[ 0.958 ]
[ 0.5658]
[ 9.113 ]
[ 1.1338]
[ 0.6517]
[-0.4583]
[ 0.6136]]
And lambda:
>>> print 'lambda: ', np.around(model.betas[-1], 4)
lambda: [ 0.6136]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. In this case we consider RD90 (resource deprivation)
as an endogenous regressor. We use FP89 (families below poverty)
for this and hence put it in the instruments parameter, 'q'.
>>> yd_var = ['RD90']
>>> yd = np.array([db.by_col(name) for name in yd_var]).T
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
And then we can run and explore the model analogously to the previous combo:
>>> model = GM_Combo_Regimes(y, x, regimes, yd, q, w=w, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT')
>>> print model.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', '_Global_W_HR90', 'lambda']
>>> print model.betas
[[ 3.41963782]
[ 1.04065841]
[ 0.16634393]
[ 8.86544628]
[ 1.85120528]
[-0.24908469]
[ 2.43014046]
[ 3.61645481]
[ 0.03308671]
[ 0.18684992]]
>>> print np.sqrt(model.vm.diagonal())
[ 0.53067577 0.13271426 0.06058025 0.76406411 0.17969783 0.07167421
0.28943121 0.25308326 0.06126529]
>>> print 'lambda: ', np.around(model.betas[-1], 4)
lambda: [ 0.1868]
"""
def __init__(self, y, x, regimes, yend=None, q=None,
w=None, w_lags=1, lag_q=True, cores=False,
constant_regi='many', cols2regi='all',
regime_err_sep=False, regime_lag_sep=False,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None, name_regimes=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
name_x = USER.set_name_x(name_x, x, constant=True)
self.name_y = USER.set_name_y(name_y)
name_yend = USER.set_name_yend(name_yend, yend)
name_q = USER.set_name_q(name_q, q)
name_q.extend(
USER.set_name_q_sp(name_x, w_lags, name_q, lag_q, force_all=True))
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend, add_cons=False)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, n, x.shape[1])
self.regime_err_sep = regime_err_sep
self.regime_lag_sep = regime_lag_sep
if regime_lag_sep == True:
if regime_err_sep == False:
raise Exception, "For spatial combo models, if spatial lag is set by regimes (regime_lag_sep=True), spatial error must also be set by regimes (regime_err_sep=True)."
add_lag = [w_lags, lag_q]
else:
if regime_err_sep == True:
raise Exception, "For spatial combo models, if spatial error is set by regimes (regime_err_sep=True), all coefficients including lambda (regime_lag_sep=True) must be set by regimes."
cols2regi += [False]
add_lag = False
yend, q = set_endog(y, x, w, yend, q, w_lags, lag_q)
name_yend.append(USER.set_name_yend_sp(self.name_y))
GM_Endog_Error_Regimes.__init__(self, y=y, x=x, yend=yend,
q=q, regimes=regimes, w=w, vm=vm, constant_regi=constant_regi,
cols2regi=cols2regi, regime_err_sep=regime_err_sep, cores=cores,
name_y=self.name_y, name_x=name_x,
name_yend=name_yend, name_q=name_q, name_w=name_w,
name_ds=name_ds, name_regimes=name_regimes, summ=False, add_lag=add_lag)
if regime_err_sep != True:
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y,
self.predy, yend[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES - REGIMES"
SUMMARY.GM_Combo(reg=self, w=w, vm=vm, regimes=True)
def _work_error(y, x, regi_ids, r, w, name_ds, name_y, name_x, name_w, name_regimes):
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
x_constant = USER.check_constant(x_r)
model = BaseGM_Error(y_r, x_constant, w_r.sparse)
set_warn(model, warn)
model.w = w_r
model.title = "SPATIALLY WEIGHTED LEAST SQUARES ESTIMATION - REGIME %s" % r
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_w = name_w
model.name_regimes = name_regimes
return model
def _work_endog_error(y, x, yend, q, regi_ids, r, w, name_ds, name_y, name_x, name_yend, name_q, name_w, name_regimes, add_lag):
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
if yend != None:
yend_r = yend[regi_ids[r]]
q_r = q[regi_ids[r]]
else:
yend_r, q_r = None, None
if add_lag != False:
yend_r, q_r = set_endog(
y_r, x_r, w_r, yend_r, q_r, add_lag[0], add_lag[1])
x_constant = USER.check_constant(x_r)
model = BaseGM_Endog_Error(y_r, x_constant, yend_r, q_r, w_r.sparse)
set_warn(model, warn)
if add_lag != False:
model.rho = model.betas[-2]
model.predy_e, model.e_pred, warn = sp_att(w_r, model.y,
model.predy, model.yend[:, -1].reshape(model.n, 1), model.rho)
set_warn(model, warn)
model.w = w_r
model.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES - REGIME %s" % r
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_yend = ['%s_%s' % (str(r), i) for i in name_yend]
model.name_z = model.name_x + model.name_yend + ['lambda']
model.name_q = ['%s_%s' % (str(r), i) for i in name_q]
model.name_h = model.name_x + model.name_q
model.name_w = name_w
model.name_regimes = name_regimes
return model
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import pysal
import numpy as np
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('CRIME')]).T
names_to_extract = ['INC']
x = np.array([dbf.by_col(name) for name in names_to_extract]).T
yd_var = ['HOVAL']
yend = np.array([dbf.by_col(name) for name in yd_var]).T
q_var = ['DISCBD']
q = np.array([dbf.by_col(name) for name in q_var]).T
regimes = regimes = dbf.by_col('NSA')
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
model = GM_Error_Regimes(y, x, regimes=regimes, w=w, name_y='crime', name_x=[
'income'], name_regimes='nsa', name_ds='columbus', regime_err_sep=True)
print model.summary
|
spreg-git/pysal
|
pysal/spreg/error_sp_regimes.py
|
Python
|
bsd-3-clause
| 64,257
|
[
"COLUMBUS"
] |
f47f1f3fc1957246cd8b38636dbff796b4f0a3cee3296102ee407f315fd85e57
|
from __future__ import division, unicode_literals
import unittest
import os
import json
import scipy
from io import open
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononDosPlotter, PhononBSPlotter, ThermoPlotter
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 51,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 4,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
204, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][4], "Y",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
8, "wrong number of tick labels")
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.get_plot(units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False)
if __name__ == "__main__":
unittest.main()
|
czhengsci/pymatgen
|
pymatgen/phonon/tests/test_plotter.py
|
Python
|
mit
| 3,708
|
[
"pymatgen"
] |
1aa4c5d37525c26c10d4a28e3c93a1a4750eb5b2939b1739fd369f1885f44fe7
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.plug._pluginreg import *
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
MODULE_VERSION="5.2"
# this is the default in gen/plug/_pluginreg.py: plg.require_active = True
#------------------------------------------------------------------------
#
# Family Lines Graph
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'familylines_graph'
plg.name = _("Family Lines Graph")
plg.description = _("Produces family line graphs using Graphviz.")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'gvfamilylines.py'
plg.ptype = REPORT
plg.authors = ["Stephane Charette"]
plg.authors_email = ["stephanecharette@gmail.com"]
plg.category = CATEGORY_GRAPHVIZ
plg.reportclass = 'FamilyLinesReport'
plg.optionclass = 'FamilyLinesOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
plg.require_active = False
#------------------------------------------------------------------------
#
# Hourglass Graph
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'hourglass_graph'
plg.name = _("Hourglass Graph")
plg.description = _("Produces an hourglass graph using Graphviz.")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'gvhourglass.py'
plg.ptype = REPORT
plg.authors = ["Brian G. Matherly"]
plg.authors_email = ["brian@gramps-project.org"]
plg.category = CATEGORY_GRAPHVIZ
plg.reportclass = 'HourGlassReport'
plg.optionclass = 'HourGlassOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
#------------------------------------------------------------------------
#
# Relationship Graph
#
#------------------------------------------------------------------------
plg = newplugin()
plg.id = 'rel_graph'
plg.name = _("Relationship Graph")
plg.description = _("Produces relationship graphs using Graphviz.")
plg.version = '1.0'
plg.gramps_target_version = MODULE_VERSION
plg.status = STABLE
plg.fname = 'gvrelgraph.py'
plg.ptype = REPORT
plg.authors = ["Brian G. Matherly"]
plg.authors_email = ["brian@gramps-project.org"]
plg.category = CATEGORY_GRAPHVIZ
plg.reportclass = 'RelGraphReport'
plg.optionclass = 'RelGraphOptions'
plg.report_modes = [REPORT_MODE_GUI, REPORT_MODE_CLI]
|
Fedik/gramps
|
gramps/plugins/graph/graphplugins.gpr.py
|
Python
|
gpl-2.0
| 3,139
|
[
"Brian"
] |
bddfb8324d8d5f2058294aa98aa195fc4e8e07180bc705f6800428d4a5b4cb32
|
# -*- coding: utf-8 -*-
from lxml import etree
from lxml import html
import requests
#doesn't work
def main():
url = 'http://lispon.moe/cdn/activity/act161108/index.html?aUserId=1494366573'
page = requests.get(url)
tree = html.fromstring(page.content)
print(page.content)
xpath_selector = "//a/@href"
#xpath_selector = "//p[contains(@class,'followed')]"
prices = tree.xpath(xpath_selector)
print(prices)
if __name__ == '__main__':
main()
|
umyuu/Sample
|
src/Python3/Q102431/exsample.py
|
Python
|
mit
| 478
|
[
"MOE"
] |
6b06cfac3e6511c5937fa846fad95de1d83ed523485cb30d90de533f3dad582d
|
#!/bin/env python
"""
Illustrate how to combine a SMIRNOFF parameterized small molecule with an AMBER parameterized protein using ParmEd.
"""
#
# Load and parameterize the small molecule
#
# Load the small molecule
from openff.toolkit.utils import get_data_file_path
ligand_filename = get_data_file_path('molecules/toluene.mol2')
molecule = Molecule.from_file(ligand_filename)
# Load the smirnoff99Frosst force field
from openff.toolkit.typing.engines import smirnoff
forcefield = smirnoff.ForceField('test_forcefields/test_forcefield.offxml')
# Create a ParmEd structure for the molecule
molecule_structure = forcefield.create_parmed_structure(topology=molecule.to_topology(), positions=molecule.positions)
print('Molecule:', molecule_structure)
#
# Load and parameterize the protein
#
# Load the protein topology
protein_pdb_filename = get_data_file_path('proteins/T4-protein.pdb')
protein_pdbfile = app.PDBFile(protein_pdb_filename)
# Load the AMBER protein force field, along with a solvent force field
from simtk.openmm import app
protein_forcefield = 'amber99sbildn.xml'
solvent_forcefield = 'tip3p.xml'
forcefield = app.ForceField(protein_forcefield, solvent_forcefield)
# Parameterize the protein
protein_system = forcefield.createSystem(proteinpdb.topology)
# Create a ParmEd Structure for the protein
protein_structure = parmed.openmm.load_topology(proteinpdb.topology,
protein_system,
xyz=proteinpdb.positions)
print('Protein:', protein_structure)
# Combine the ParmEd Structure objects to produce a fully parameterized complex
# that can now be exported to AMBER, CHARMM, OpenMM, and other packages
# Note that the addition should always add the small molecule second so the box vectors if the first item (the protein) are to be preserved
complex_structure = protein_structure + molecule_structure
print('Complex:', complex_structure)
# TODO: How can we add solvent while ensuring the ligand doesn't overlap with solvent molecules?
# TODO: Can we have SMIRNOFF ForceField create an OpenMM ffxml file for the ligand, and then use the OpenMM pipeline?
# TODO: Or can OpenMM just use dummy parameters?
|
open-forcefield-group/openforcefield
|
examples/deprecated/mixedFF_structure/generate_mixedFF_complex.py
|
Python
|
mit
| 2,220
|
[
"Amber",
"CHARMM",
"OpenMM"
] |
a14b5069845aae1ee5c9e858cee047475db2f15bb0438bdac9c61dbc1e1c679a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a formatter for the Mozilla Firefox history."""
from plaso.lib import errors
from plaso.lib import eventdata
class FirefoxBookmarkAnnotationFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for a Firefox places.sqlite bookmark annotation."""
DATA_TYPE = 'firefox:places:bookmark_annotation'
FORMAT_STRING_PIECES = [
u'Bookmark Annotation: [{content}]',
u'to bookmark [{title}]',
u'({url})']
FORMAT_STRING_SHORT_PIECES = [u'Bookmark Annotation: {title}']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxBookmarkFolderFormatter(eventdata.EventFormatter):
"""Formatter for a Firefox places.sqlite bookmark folder."""
DATA_TYPE = 'firefox:places:bookmark_folder'
FORMAT_STRING = u'{title}'
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxBookmarkFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for a Firefox places.sqlite URL bookmark."""
DATA_TYPE = 'firefox:places:bookmark'
FORMAT_STRING_PIECES = [
u'Bookmark {type}',
u'{title}',
u'({url})',
u'[{places_title}]',
u'visit count {visit_count}']
FORMAT_STRING_SHORT_PIECES = [
u'Bookmarked {title}',
u'({url})']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
class FirefoxPageVisitFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for a Firefox places.sqlite page visited."""
DATA_TYPE = 'firefox:places:page_visited'
# Transitions defined in the source file:
# src/toolkit/components/places/nsINavHistoryService.idl
# Also contains further explanation into what each of these settings mean.
_URL_TRANSITIONS = {
1: 'LINK',
2: 'TYPED',
3: 'BOOKMARK',
4: 'EMBED',
5: 'REDIRECT_PERMANENT',
6: 'REDIRECT_TEMPORARY',
7: 'DOWNLOAD',
8: 'FRAMED_LINK',
}
_URL_TRANSITIONS.setdefault('UNKOWN')
# TODO: Make extra conditional formatting.
FORMAT_STRING_PIECES = [
u'{url}',
u'({title})',
u'[count: {visit_count}]',
u'Host: {host}',
u'{extra_string}']
FORMAT_STRING_SHORT_PIECES = [u'URL: {url}']
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
def GetMessages(self, event_object):
"""Return the message strings."""
if self.DATA_TYPE != event_object.data_type:
raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(
event_object.data_type))
transition = self._URL_TRANSITIONS.get(
getattr(event_object, 'visit_type', 0), None)
if transition:
transition_str = u'Transition: {0!s}'.format(transition)
if hasattr(event_object, 'extra'):
if transition:
event_object.extra.append(transition_str)
event_object.extra_string = u' '.join(event_object.extra)
elif transition:
event_object.extra_string = transition_str
return super(FirefoxPageVisitFormatter, self).GetMessages(event_object)
class FirefoxDowloadFormatter(eventdata.EventFormatter):
"""Formatter for a Firefox dowloads.sqlite dowload."""
DATA_TYPE = 'firefox:downloads:download'
FORMAT_STRING = (u'{url} ({full_path}). Received: {received_bytes} bytes '
u'out of: {total_bytes} bytes.')
FORMAT_STRING_SHORT = u'{full_path} downloaded ({received_bytes} bytes)'
SOURCE_LONG = 'Firefox History'
SOURCE_SHORT = 'WEBHIST'
|
iwm911/plaso
|
plaso/formatters/firefox.py
|
Python
|
apache-2.0
| 4,092
|
[
"VisIt"
] |
6b0e821657c9f2951052233a855fa346cb746abdf621b5afeec2efaf414d9c05
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.resourcemanager_v3.services.tag_keys import TagKeysAsyncClient
from google.cloud.resourcemanager_v3.services.tag_keys import TagKeysClient
from google.cloud.resourcemanager_v3.services.tag_keys import pagers
from google.cloud.resourcemanager_v3.services.tag_keys import transports
from google.cloud.resourcemanager_v3.types import tag_keys
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TagKeysClient._get_default_mtls_endpoint(None) is None
assert TagKeysClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
TagKeysClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
)
assert (
TagKeysClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TagKeysClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert TagKeysClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [TagKeysClient, TagKeysAsyncClient,])
def test_tag_keys_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudresourcemanager.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TagKeysGrpcTransport, "grpc"),
(transports.TagKeysGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tag_keys_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [TagKeysClient, TagKeysAsyncClient,])
def test_tag_keys_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudresourcemanager.googleapis.com:443"
def test_tag_keys_client_get_transport_class():
transport = TagKeysClient.get_transport_class()
available_transports = [
transports.TagKeysGrpcTransport,
]
assert transport in available_transports
transport = TagKeysClient.get_transport_class("grpc")
assert transport == transports.TagKeysGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TagKeysClient, transports.TagKeysGrpcTransport, "grpc"),
(TagKeysAsyncClient, transports.TagKeysGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
TagKeysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TagKeysClient)
)
@mock.patch.object(
TagKeysAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TagKeysAsyncClient)
)
def test_tag_keys_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TagKeysClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TagKeysClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(TagKeysClient, transports.TagKeysGrpcTransport, "grpc", "true"),
(
TagKeysAsyncClient,
transports.TagKeysGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(TagKeysClient, transports.TagKeysGrpcTransport, "grpc", "false"),
(
TagKeysAsyncClient,
transports.TagKeysGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TagKeysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TagKeysClient)
)
@mock.patch.object(
TagKeysAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TagKeysAsyncClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_tag_keys_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [TagKeysClient, TagKeysAsyncClient])
@mock.patch.object(
TagKeysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TagKeysClient)
)
@mock.patch.object(
TagKeysAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TagKeysAsyncClient)
)
def test_tag_keys_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TagKeysClient, transports.TagKeysGrpcTransport, "grpc"),
(TagKeysAsyncClient, transports.TagKeysGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tag_keys_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(TagKeysClient, transports.TagKeysGrpcTransport, "grpc", grpc_helpers),
(
TagKeysAsyncClient,
transports.TagKeysGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_tag_keys_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_tag_keys_client_client_options_from_dict():
with mock.patch(
"google.cloud.resourcemanager_v3.services.tag_keys.transports.TagKeysGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TagKeysClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(TagKeysClient, transports.TagKeysGrpcTransport, "grpc", grpc_helpers),
(
TagKeysAsyncClient,
transports.TagKeysGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_tag_keys_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"cloudresourcemanager.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=None,
default_host="cloudresourcemanager.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [tag_keys.ListTagKeysRequest, dict,])
def test_list_tag_keys(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tag_keys), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tag_keys.ListTagKeysResponse(
next_page_token="next_page_token_value",
)
response = client.list_tag_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.ListTagKeysRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTagKeysPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tag_keys_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tag_keys), "__call__") as call:
client.list_tag_keys()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.ListTagKeysRequest()
@pytest.mark.asyncio
async def test_list_tag_keys_async(
transport: str = "grpc_asyncio", request_type=tag_keys.ListTagKeysRequest
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tag_keys), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tag_keys.ListTagKeysResponse(next_page_token="next_page_token_value",)
)
response = await client.list_tag_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.ListTagKeysRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTagKeysAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tag_keys_async_from_dict():
await test_list_tag_keys_async(request_type=dict)
def test_list_tag_keys_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tag_keys), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tag_keys.ListTagKeysResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tag_keys(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tag_keys_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tag_keys(
tag_keys.ListTagKeysRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tag_keys_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tag_keys), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tag_keys.ListTagKeysResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tag_keys.ListTagKeysResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tag_keys(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tag_keys_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tag_keys(
tag_keys.ListTagKeysRequest(), parent="parent_value",
)
def test_list_tag_keys_pager(transport_name: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tag_keys), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(), tag_keys.TagKey(),],
next_page_token="abc",
),
tag_keys.ListTagKeysResponse(tag_keys=[], next_page_token="def",),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(),], next_page_token="ghi",
),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(),],
),
RuntimeError,
)
metadata = ()
pager = client.list_tag_keys(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tag_keys.TagKey) for i in results)
def test_list_tag_keys_pages(transport_name: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tag_keys), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(), tag_keys.TagKey(),],
next_page_token="abc",
),
tag_keys.ListTagKeysResponse(tag_keys=[], next_page_token="def",),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(),], next_page_token="ghi",
),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(),],
),
RuntimeError,
)
pages = list(client.list_tag_keys(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tag_keys_async_pager():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tag_keys), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(), tag_keys.TagKey(),],
next_page_token="abc",
),
tag_keys.ListTagKeysResponse(tag_keys=[], next_page_token="def",),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(),], next_page_token="ghi",
),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(),],
),
RuntimeError,
)
async_pager = await client.list_tag_keys(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tag_keys.TagKey) for i in responses)
@pytest.mark.asyncio
async def test_list_tag_keys_async_pages():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tag_keys), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(), tag_keys.TagKey(),],
next_page_token="abc",
),
tag_keys.ListTagKeysResponse(tag_keys=[], next_page_token="def",),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(),], next_page_token="ghi",
),
tag_keys.ListTagKeysResponse(
tag_keys=[tag_keys.TagKey(), tag_keys.TagKey(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tag_keys(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [tag_keys.GetTagKeyRequest, dict,])
def test_get_tag_key(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tag_keys.TagKey(
name="name_value",
parent="parent_value",
short_name="short_name_value",
namespaced_name="namespaced_name_value",
description="description_value",
etag="etag_value",
)
response = client.get_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.GetTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tag_keys.TagKey)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.short_name == "short_name_value"
assert response.namespaced_name == "namespaced_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
def test_get_tag_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_key), "__call__") as call:
client.get_tag_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.GetTagKeyRequest()
@pytest.mark.asyncio
async def test_get_tag_key_async(
transport: str = "grpc_asyncio", request_type=tag_keys.GetTagKeyRequest
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tag_keys.TagKey(
name="name_value",
parent="parent_value",
short_name="short_name_value",
namespaced_name="namespaced_name_value",
description="description_value",
etag="etag_value",
)
)
response = await client.get_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.GetTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tag_keys.TagKey)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.short_name == "short_name_value"
assert response.namespaced_name == "namespaced_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_tag_key_async_from_dict():
await test_get_tag_key_async(request_type=dict)
def test_get_tag_key_field_headers():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tag_keys.GetTagKeyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_key), "__call__") as call:
call.return_value = tag_keys.TagKey()
client.get_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tag_key_field_headers_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tag_keys.GetTagKeyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_key), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tag_keys.TagKey())
await client.get_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tag_key_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tag_keys.TagKey()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tag_key(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tag_key_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tag_key(
tag_keys.GetTagKeyRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tag_key_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tag_keys.TagKey()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tag_keys.TagKey())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tag_key(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tag_key_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tag_key(
tag_keys.GetTagKeyRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [tag_keys.CreateTagKeyRequest, dict,])
def test_create_tag_key(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.CreateTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_tag_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag_key), "__call__") as call:
client.create_tag_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.CreateTagKeyRequest()
@pytest.mark.asyncio
async def test_create_tag_key_async(
transport: str = "grpc_asyncio", request_type=tag_keys.CreateTagKeyRequest
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.CreateTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_tag_key_async_from_dict():
await test_create_tag_key_async(request_type=dict)
def test_create_tag_key_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tag_key(tag_key=tag_keys.TagKey(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tag_key
mock_val = tag_keys.TagKey(name="name_value")
assert arg == mock_val
def test_create_tag_key_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tag_key(
tag_keys.CreateTagKeyRequest(), tag_key=tag_keys.TagKey(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tag_key_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tag_key(
tag_key=tag_keys.TagKey(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tag_key
mock_val = tag_keys.TagKey(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tag_key_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tag_key(
tag_keys.CreateTagKeyRequest(), tag_key=tag_keys.TagKey(name="name_value"),
)
@pytest.mark.parametrize("request_type", [tag_keys.UpdateTagKeyRequest, dict,])
def test_update_tag_key(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.UpdateTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_tag_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag_key), "__call__") as call:
client.update_tag_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.UpdateTagKeyRequest()
@pytest.mark.asyncio
async def test_update_tag_key_async(
transport: str = "grpc_asyncio", request_type=tag_keys.UpdateTagKeyRequest
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.UpdateTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_tag_key_async_from_dict():
await test_update_tag_key_async(request_type=dict)
def test_update_tag_key_field_headers():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tag_keys.UpdateTagKeyRequest()
request.tag_key.name = "tag_key.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag_key), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tag_key.name=tag_key.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_tag_key_field_headers_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tag_keys.UpdateTagKeyRequest()
request.tag_key.name = "tag_key.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag_key), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tag_key.name=tag_key.name/value",) in kw[
"metadata"
]
def test_update_tag_key_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tag_key(
tag_key=tag_keys.TagKey(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tag_key
mock_val = tag_keys.TagKey(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tag_key_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tag_key(
tag_keys.UpdateTagKeyRequest(),
tag_key=tag_keys.TagKey(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tag_key_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tag_key(
tag_key=tag_keys.TagKey(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tag_key
mock_val = tag_keys.TagKey(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tag_key_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tag_key(
tag_keys.UpdateTagKeyRequest(),
tag_key=tag_keys.TagKey(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [tag_keys.DeleteTagKeyRequest, dict,])
def test_delete_tag_key(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.DeleteTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_tag_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag_key), "__call__") as call:
client.delete_tag_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.DeleteTagKeyRequest()
@pytest.mark.asyncio
async def test_delete_tag_key_async(
transport: str = "grpc_asyncio", request_type=tag_keys.DeleteTagKeyRequest
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tag_keys.DeleteTagKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_tag_key_async_from_dict():
await test_delete_tag_key_async(request_type=dict)
def test_delete_tag_key_field_headers():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tag_keys.DeleteTagKeyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag_key), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tag_key_field_headers_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tag_keys.DeleteTagKeyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag_key), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_tag_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tag_key_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tag_key(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tag_key_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tag_key(
tag_keys.DeleteTagKeyRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tag_key_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag_key), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tag_key(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tag_key_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tag_key(
tag_keys.DeleteTagKeyRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,])
def test_get_iam_policy(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_get_iam_policy_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
def test_set_iam_policy(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_set_iam_policy_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize(
"request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,]
)
def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_test_iam_permissions_flattened():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
def test_test_iam_permissions_flattened_error():
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_error_async():
client = TagKeysAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TagKeysGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TagKeysGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TagKeysClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TagKeysGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TagKeysClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TagKeysClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TagKeysGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TagKeysClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TagKeysGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TagKeysClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TagKeysGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TagKeysGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.TagKeysGrpcTransport, transports.TagKeysGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TagKeysClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.TagKeysGrpcTransport,)
def test_tag_keys_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TagKeysTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_tag_keys_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.resourcemanager_v3.services.tag_keys.transports.TagKeysTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TagKeysTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_tag_keys",
"get_tag_key",
"create_tag_key",
"update_tag_key",
"delete_tag_key",
"get_iam_policy",
"set_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_tag_keys_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.resourcemanager_v3.services.tag_keys.transports.TagKeysTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TagKeysTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
def test_tag_keys_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.resourcemanager_v3.services.tag_keys.transports.TagKeysTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TagKeysTransport()
adc.assert_called_once()
def test_tag_keys_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TagKeysClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.TagKeysGrpcTransport, transports.TagKeysGrpcAsyncIOTransport,],
)
def test_tag_keys_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TagKeysGrpcTransport, grpc_helpers),
(transports.TagKeysGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_tag_keys_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"cloudresourcemanager.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
),
scopes=["1", "2"],
default_host="cloudresourcemanager.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.TagKeysGrpcTransport, transports.TagKeysGrpcAsyncIOTransport],
)
def test_tag_keys_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_tag_keys_host_no_port():
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudresourcemanager.googleapis.com"
),
)
assert client.transport._host == "cloudresourcemanager.googleapis.com:443"
def test_tag_keys_host_with_port():
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudresourcemanager.googleapis.com:8000"
),
)
assert client.transport._host == "cloudresourcemanager.googleapis.com:8000"
def test_tag_keys_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TagKeysGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_tag_keys_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TagKeysGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.TagKeysGrpcTransport, transports.TagKeysGrpcAsyncIOTransport],
)
def test_tag_keys_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.TagKeysGrpcTransport, transports.TagKeysGrpcAsyncIOTransport],
)
def test_tag_keys_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_tag_keys_grpc_lro_client():
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_tag_keys_grpc_lro_async_client():
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_tag_key_path():
tag_key = "squid"
expected = "tagKeys/{tag_key}".format(tag_key=tag_key,)
actual = TagKeysClient.tag_key_path(tag_key)
assert expected == actual
def test_parse_tag_key_path():
expected = {
"tag_key": "clam",
}
path = TagKeysClient.tag_key_path(**expected)
# Check that the path construction is reversible.
actual = TagKeysClient.parse_tag_key_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TagKeysClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = TagKeysClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TagKeysClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = TagKeysClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = TagKeysClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TagKeysClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = TagKeysClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = TagKeysClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TagKeysClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = TagKeysClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = TagKeysClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TagKeysClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TagKeysClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = TagKeysClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TagKeysClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TagKeysTransport, "_prep_wrapped_messages"
) as prep:
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TagKeysTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TagKeysClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TagKeysAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TagKeysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(TagKeysClient, transports.TagKeysGrpcTransport),
(TagKeysAsyncClient, transports.TagKeysGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-resource-manager
|
tests/unit/gapic/resourcemanager_v3/test_tag_keys.py
|
Python
|
apache-2.0
| 115,667
|
[
"Octopus"
] |
3a60c2f2da1efb46c362677d7bb47679d82b73823bb61358f8a1118ff1f1d38a
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`resources` module contains a bunch of resources for OpenLP.
DO NOT REMOVE THIS FILE, IT IS REQUIRED FOR INCLUDING THE RESOURCES ON SOME
PLATFORMS!
"""
|
marmyshev/transitions
|
resources/__init__.py
|
Python
|
gpl-2.0
| 2,272
|
[
"Brian"
] |
2124b5facd659ad24c34345d1fabfb3703acac72789f792f4e4e55db65bea20d
|
import unittest
from __main__ import vtk, qt, ctk, slicer
import SimpleITK as sitk
import sitkUtils
#
# LabelObjectStatistics
#
class LabelObjectStatistics:
def __init__(self, parent):
import string
parent.title = "Label Object Statistics"
parent.categories = ["Microscopy"]
parent.contributors = ["Bradley Lowekamp (MSC/NLM)"]
parent.helpText = string.Template("""
Use this module to calculate counts and volumes for different labels of a label map plus statistics on the grayscale background volume. Note: volumes must have same dimensions. See <a href=\"$a/Documentation/$b.$c/Modules/LabelObjectStatistics\">$a/Documentation/$b.$c/Modules/LabelObjectStatistics</a> for more information.
""").substitute({ 'a':parent.slicerWikiUrl, 'b':slicer.app.majorVersion, 'c':slicer.app.minorVersion })
parent.acknowledgementText = """
This module is derived from the "Label Statistics" module implemented by Steve Pieper supported by NA-MIC, NAC, BIRN, NCIGT, and the Slicer Community. See http://www.slicer.org for details.
"""
self.parent = parent
#
# qSlicerPythonModuleExampleWidget
#
class LabelObjectStatisticsWidget:
def __init__(self, parent=None):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.logic = None
self.grayscaleNode = None
self.labelNode = None
self.fileName = None
self.fileDialog = None
if not parent:
self.setup()
self.grayscaleSelector.setMRMLScene(slicer.mrmlScene)
self.labelSelector.setMRMLScene(slicer.mrmlScene)
self.parent.show()
def setup(self):
#
# the grayscale volume selector
#
self.grayscaleSelectorFrame = qt.QFrame(self.parent)
self.grayscaleSelectorFrame.setLayout(qt.QHBoxLayout())
self.parent.layout().addWidget(self.grayscaleSelectorFrame)
self.grayscaleSelectorLabel = qt.QLabel("Grayscale Volume: ", self.grayscaleSelectorFrame)
self.grayscaleSelectorLabel.setToolTip( "Select the grayscale volume (background grayscale scalar volume node) for statistics calculations")
self.grayscaleSelectorFrame.layout().addWidget(self.grayscaleSelectorLabel)
self.grayscaleSelector = slicer.qMRMLNodeComboBox(self.grayscaleSelectorFrame)
self.grayscaleSelector.nodeTypes = ( ("vtkMRMLScalarVolumeNode"), "" )
self.grayscaleSelector.selectNodeUponCreation = False
self.grayscaleSelector.addEnabled = False
self.grayscaleSelector.removeEnabled = False
self.grayscaleSelector.noneEnabled = True
self.grayscaleSelector.showHidden = False
self.grayscaleSelector.showChildNodeTypes = False
self.grayscaleSelector.setMRMLScene( slicer.mrmlScene )
# TODO: need to add a QLabel
# self.grayscaleSelector.SetLabelText( "Master Volume:" )
self.grayscaleSelectorFrame.layout().addWidget(self.grayscaleSelector)
#
# the label volume selector
#
self.labelSelectorFrame = qt.QFrame()
self.labelSelectorFrame.setLayout( qt.QHBoxLayout() )
self.parent.layout().addWidget( self.labelSelectorFrame )
self.labelSelectorLabel = qt.QLabel()
self.labelSelectorLabel.setText( "Label Map: " )
self.labelSelectorFrame.layout().addWidget( self.labelSelectorLabel )
self.labelSelector = slicer.qMRMLNodeComboBox()
self.labelSelector.nodeTypes = ( "vtkMRMLLabelMapVolumeNode", "" )
# todo addAttribute
self.labelSelector.selectNodeUponCreation = False
self.labelSelector.addEnabled = False
self.labelSelector.noneEnabled = True
self.labelSelector.removeEnabled = False
self.labelSelector.showHidden = False
self.labelSelector.showChildNodeTypes = False
self.labelSelector.setMRMLScene( slicer.mrmlScene )
self.labelSelector.setToolTip( "Pick the label map to edit" )
self.labelSelectorFrame.layout().addWidget( self.labelSelector )
# Apply button
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Calculate Statistics."
self.applyButton.enabled = False
self.parent.layout().addWidget(self.applyButton)
# model and view for stats table
self.view = qt.QTableView()
self.view.sortingEnabled = True
self.parent.layout().addWidget(self.view)
# Chart button
self.chartFrame = qt.QFrame()
self.chartFrame.setLayout(qt.QHBoxLayout())
self.parent.layout().addWidget(self.chartFrame)
self.chartButton = qt.QPushButton("Chart")
self.chartButton.toolTip = "Make a chart from the current statistics."
self.chartFrame.layout().addWidget(self.chartButton)
self.chartOption = qt.QComboBox()
self.chartFrame.layout().addWidget(self.chartOption)
self.chartIgnoreZero = qt.QCheckBox()
self.chartIgnoreZero.setText('Ignore Zero')
self.chartIgnoreZero.checked = False
self.chartIgnoreZero.setToolTip('Do not include the zero index in the chart to avoid dwarfing other bars')
self.chartFrame.layout().addWidget(self.chartIgnoreZero)
self.chartFrame.enabled = False
# Save button
self.saveButton = qt.QPushButton("Save")
self.saveButton.toolTip = "Calculate Statistics."
self.saveButton.enabled = False
self.parent.layout().addWidget(self.saveButton)
# Add vertical spacer
self.parent.layout().addStretch(1)
# connections
self.applyButton.connect('clicked()', self.onApply)
self.chartButton.connect('clicked()', self.onChart)
self.saveButton.connect('clicked()', self.onSave)
self.grayscaleSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onGrayscaleSelect)
self.labelSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onLabelSelect)
def onGrayscaleSelect(self, node):
self.grayscaleNode = node
self.applyButton.enabled = bool(self.grayscaleNode) and bool(self.labelNode)
def onLabelSelect(self, node):
self.labelNode = node
self.applyButton.enabled = bool(self.grayscaleNode) and bool(self.labelNode)
def onApply(self):
"""Calculate the label statistics
"""
self.applyButton.text = "Working..."
# TODO: why doesn't processEvents alone make the label text change?
self.applyButton.repaint()
slicer.app.processEvents()
volumesLogic = slicer.modules.volumes.logic()
warnings = volumesLogic.CheckForLabelVolumeValidity(self.grayscaleNode, self.labelNode)
resampledLabelNode = None
if warnings != "":
if 'mismatch' in warnings:
resampledLabelNode = volumesLogic.ResampleVolumeToReferenceVolume(self.labelNode, self.grayscaleNode)
self.logic = LabelObjectStatisticsLogic(self.grayscaleNode, resampledLabelNode)
else:
qt.QMessageBox.warning(slicer.util.mainWindow(),
"Label Statistics", "Volumes do not have the same geometry.\n%s" % warnings)
return
else:
self.logic = LabelObjectStatisticsLogic(self.grayscaleNode, self.labelNode)
self.populateStats()
self.populateChartOption()
if resampledLabelNode:
slicer.mrmlScene.RemoveNode(resampledLabelNode)
self.chartFrame.enabled = True
self.saveButton.enabled = True
self.applyButton.text = "Apply"
def onChart(self):
"""chart the label statistics
"""
valueToPlot = self.chartOption.currentText
ignoreZero = self.chartIgnoreZero.checked
if not valueToPlot is None:
self.logic.createStatsChart(self.labelNode,valueToPlot,ignoreZero)
else:
print "Selected item is unexpectedly None!"
def onSave(self):
"""save the label statistics
"""
if not self.fileDialog:
self.fileDialog = qt.QFileDialog(self.parent)
self.fileDialog.options = self.fileDialog.DontUseNativeDialog
self.fileDialog.acceptMode = self.fileDialog.AcceptSave
self.fileDialog.defaultSuffix = "csv"
self.fileDialog.setNameFilter("Comma Separated Values (*.csv)")
self.fileDialog.connect("fileSelected(QString)", self.onFileSelected)
self.fileDialog.show()
def onFileSelected(self,fileName):
self.logic.saveStats(fileName)
def populateStats(self):
if not self.logic:
return
displayNode = self.labelNode.GetDisplayNode()
colorNode = displayNode.GetColorNode()
lut = colorNode.GetLookupTable()
self.items = []
self.model = qt.QStandardItemModel()
self.view.setModel(self.model)
self.view.verticalHeader().visible = False
row = 0
for i in self.logic.labelStats["Labels"]:
color = qt.QColor()
rgb = lut.GetTableValue(i)
color.setRgb(rgb[0]*255,rgb[1]*255,rgb[2]*255)
item = qt.QStandardItem()
item.setData(color,qt.Qt.DecorationRole)
item.setToolTip(colorNode.GetColorName(i))
item.setEditable(False)
self.model.setItem(row,0,item)
self.items.append(item)
col = 1
for k in self.logic.keys:
item = qt.QStandardItem()
# set data as float with Qt::DisplayRole
try:
v = float(self.logic.labelStats[i,k])
except (KeyError, TypeError):
v = float('inf')
item.setData(v,qt.Qt.DisplayRole)
item.setToolTip(colorNode.GetColorName(i))
item.setEditable(False)
self.model.setItem(row,col,item)
self.items.append(item)
col += 1
row += 1
self.view.setColumnWidth(0,30)
self.model.setHeaderData(0,1," ")
col = 1
for k in self.logic.keys:
self.view.setColumnWidth(col,15*len(k))
self.model.setHeaderData(col,1,k)
col += 1
def populateChartOption(self):
self.chartOption.clear()
self.chartOption.addItems(self.logic.keys)
class LabelObjectStatisticsLogic:
"""Implement the logic to calculate label statistics.
Nodes are passed in as arguments.
Results are stored as 'statistics' instance variable.
"""
def __init__(self, grayscaleNode, labelNode, fileName=None):
#import numpy
self.keys = ["Label", "Count", "Volume mm^3", "Volume cc", "Min", "Max", "Mean", "StdDev"]
cubicMMPerVoxel = reduce(lambda x,y: x*y, labelNode.GetSpacing())
ccPerCubicMM = 0.001
# TODO: progress and status updates
# this->InvokeEvent(vtkLabelStatisticsLogic::StartLabelStats, (void*)"start label stats")
self.labelStats = {}
self.labelStats['Labels'] = []
labelNodeName = labelNode.GetName()
labelImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(labelNodeName))
grayscaleNodeName = grayscaleNode.GetName();
grayscaleImage = sitk.ReadImage(sitkUtils.GetSlicerITKReadWriteAddress(grayscaleNodeName))
sitkStats = sitk.LabelStatisticsImageFilter()
sitkStats.Execute(grayscaleImage, labelImage)
for l in sitkStats.GetLabels():
# add an entry to the LabelStats list
self.labelStats["Labels"].append(l)
self.labelStats[l,"Label"] = l
self.labelStats[l,"Count"] = sitkStats.GetCount(l)
self.labelStats[l,"Volume mm^3"] = self.labelStats[l,"Count"] * cubicMMPerVoxel
self.labelStats[l,"Volume cc"] = self.labelStats[l,"Volume mm^3"] * ccPerCubicMM
self.labelStats[l,"Min"] = sitkStats.GetMinimum(l)
self.labelStats[l,"Max"] = sitkStats.GetMaximum(l)
self.labelStats[l,"Mean"] = sitkStats.GetMean(l)
self.labelStats[l,"StdDev"] = sitkStats.GetSigma(l)
self.labelStats[l,"Sum"] = sitkStats.GetSum(l)
del sitkStats
sitkShapeStats = sitk.LabelShapeStatisticsImageFilter()
sitkShapeStats.ComputeFeretDiameterOff()
sitkShapeStats.ComputePerimeterOn()
sitkShapeStats.Execute( labelImage )
# use a set to accumulate attributes to make sure they are unuque
shapeAttributes = [
# 'Number Of Pixels',
# 'Physical Size',
# 'Centroid',
# 'Bounding Box',
'Number Of Pixels On Border',
'Perimeter On Border',
'Perimeter On Border Ratio',
# 'Principal Moments',
'Principal Axes',
'Elongation',
'Perimeter',
'Roundness',
'Equivalent Spherical Radius',
'Equivalent Spherical Perimeter',
# 'Equivalent Ellipsoid Diameter',
'Flatness',
'Feret Diameter'
]
if not sitkShapeStats.GetComputeFeretDiameter():
shapeAttributes.remove( 'Feret Diameter' )
if not sitkShapeStats.GetComputePerimeter():
shapeAttributes.remove( 'Perimeter' )
# We don't have a good way to show
shapeAttributes.remove( 'Principal Axes' )
self.keys += shapeAttributes
for l in sitkShapeStats.GetLabels():
# add attributes form the Shape label object
for name in shapeAttributes:
attr = getattr(sitkShapeStats,"Get"+name.replace(' ', '') )(l)
self.labelStats[l, name] = attr
for l in sitkShapeStats.GetLabels():
attr = getattr(sitkShapeStats,"Get"+"PrincipalMoments" )(l)
for i in range(1,4):
self.labelStats[l, "Principal Moments "+str(i) ] = attr[i-1]
self.keys += ["Principal Moments "+str(i) for i in range(1,4)]
# this.InvokeEvent(vtkLabelStatisticsLogic::LabelStatsInnerLoop, (void*)"1")
# this.InvokeEvent(vtkLabelStatisticsLogic::EndLabelStats, (void*)"end label stats")
def createStatsChart(self, labelNode, valueToPlot, ignoreZero=False):
"""Make a MRML chart of the current stats
"""
layoutNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLLayoutNode')
layoutNodes.SetReferenceCount(layoutNodes.GetReferenceCount()-1)
layoutNodes.InitTraversal()
layoutNode = layoutNodes.GetNextItemAsObject()
layoutNode.SetViewArrangement(slicer.vtkMRMLLayoutNode.SlicerLayoutConventionalQuantitativeView)
chartViewNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLChartViewNode')
chartViewNodes.SetReferenceCount(chartViewNodes.GetReferenceCount()-1)
chartViewNodes.InitTraversal()
chartViewNode = chartViewNodes.GetNextItemAsObject()
arrayNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLDoubleArrayNode())
array = arrayNode.GetArray()
samples = len(self.labelStats["Labels"])
tuples = samples
if ignoreZero and self.labelStats["Labels"].__contains__(0):
tuples -= 1
array.SetNumberOfTuples(tuples)
tuple = 0
for i in xrange(samples):
index = self.labelStats["Labels"][i]
if not (ignoreZero and index == 0):
array.SetComponent(tuple, 0, index)
try:
v = float(self.labelStats[index,valueToPlot])
except (KeyError, TypeError):
v = float(0)
array.SetComponent(tuple, 1, v)
array.SetComponent(tuple, 2, 0)
tuple += 1
chartNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLChartNode())
state = chartNode.StartModify()
chartNode.AddArray(valueToPlot, arrayNode.GetID())
chartViewNode.SetChartNodeID(chartNode.GetID())
chartNode.SetProperty('default', 'title', 'Label Statistics')
chartNode.SetProperty('default', 'xAxisLabel', 'Label')
chartNode.SetProperty('default', 'yAxisLabel', valueToPlot)
chartNode.SetProperty('default', 'type', 'Bar');
chartNode.SetProperty('default', 'xAxisType', 'categorical')
chartNode.SetProperty('default', 'showLegend', 'off')
# series level properties
if labelNode.GetDisplayNode() != None and labelNode.GetDisplayNode().GetColorNode() != None:
chartNode.SetProperty(valueToPlot, 'lookupTable', labelNode.GetDisplayNode().GetColorNodeID());
chartNode.EndModify(state)
def statsAsCSV(self):
"""
print comma separated value file with header keys in quotes
"""
csv = ""
header = ""
for k in self.keys[:-1]:
header += "\"%s\"" % k + ","
header += "\"%s\"" % self.keys[-1] + "\n"
csv = header
for i in self.labelStats["Labels"]:
valuesAsStr = [ str(self.labelStats[i,k]) if (i,k) in self.labelStats else '' for k in self.keys ]
line = ",".join(valuesAsStr)
line += "\n"
csv += line
return csv
def saveStats(self,fileName):
fp = open(fileName, "w")
fp.write(self.statsAsCSV())
fp.close()
class LabelObjectStatisticsTest(unittest.TestCase):
"""
This is the test case.
"""
def delayDisplay(self,message,msec=1000):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self,scenario=None):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_LabelObjectStatisticsBasic()
self.test_LabelObjectStatisticsWidget()
self.test_LabelObjectStatisticsLogic()
def test_LabelObjectStatisticsBasic(self):
"""
This tests some aspects of the label statistics
"""
self.delayDisplay("Starting test_LabelObjectStatisticsBasic")
#
# first, get some data
#
import SampleData
sampleDataLogic = SampleData.SampleDataLogic()
mrHead = sampleDataLogic.downloadMRHead()
ctChest = sampleDataLogic.downloadCTChest()
self.delayDisplay('Two data sets loaded')
volumesLogic = slicer.modules.volumes.logic()
mrHeadLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, mrHead, "mrHead-label" )
warnings = volumesLogic.CheckForLabelVolumeValidity(ctChest, mrHeadLabel)
self.delayDisplay("Warnings for mismatch:\n%s" % warnings)
self.assertTrue( warnings != "" )
warnings = volumesLogic.CheckForLabelVolumeValidity(mrHead, mrHeadLabel)
self.delayDisplay("Warnings for match:\n%s" % warnings)
self.assertTrue( warnings == "" )
self.delayDisplay('test_LabelObjectStatisticsBasic passed!')
def test_LabelObjectStatisticsWidget(self):
return
self.delayDisplay("Starting test_LabelObjectStatisticsWidget")
m = slicer.util.mainWindow()
m.moduleSelector().selectModule('LabelObjectStatistics')
print dir(slicer.modules)
testWidget = slicer.modules.LabelObjectStatisticsWidget
def test_LabelObjectStatisticsLogic(self):
self.delayDisplay("Starting test_LabelObjectStatisticsLogic")
import SampleData
sampleDataLogic = SampleData.SampleDataLogic()
mrHead = sampleDataLogic.downloadMRHead()
img = sitkUtils.PullFromSlicer( mrHead.GetName() )
labelImg = sitk.OtsuMultipleThresholds(img, 3)
labelNodeName = "OtsuMultipleThresholdLabelMap"
sitkUtils.PushToSlicer(labelImg, "OtsuMultipleThresholdLabelMap", 2)
mrHeadLabel = slicer.util.getNode(labelNodeName)
logic = LabelObjectStatisticsLogic( mrHead, mrHeadLabel )
print logic.keys
print logic.labelStats
logic.saveStats("test_LabelObjectStatisticsLogic.csv")
|
blowekamp/Slicer-IASEM
|
LabelObjectStatistics/LabelObjectStatistics.py
|
Python
|
apache-2.0
| 19,184
|
[
"VTK"
] |
b99b33d197f55a8fc9d49f976990ad0c5902f851a11d6f79fd473d591722be52
|
from django import forms
from django.contrib.localflavor.cz.forms import CZBirthNumberField
from django.utils.translation import ugettext_lazy as _
from djcode.reservations.models import Examination_kind, Patient, Visit_reservation
class Patient_form(forms.ModelForm):
label_suffix = ":"
class Meta:
model = Patient
ident_hash = CZBirthNumberField(label=_("Birth number"))
phone_number = forms.RegexField(
label=_("Phone number"),
min_length=5,
max_length=100,
regex = r"\d+",
error_messages={"invalid": _(u"Enter a valid 'phone number' consisting of numbers only.")}
)
reservation = forms.ModelChoiceField(
queryset=Visit_reservation.objects.all(),
widget=forms.HiddenInput(),
error_messages={"required": _("Please select time of visit reservation")}
)
exam_kind = forms.ModelChoiceField(
empty_label=None,
queryset=Examination_kind.objects.all(),
widget=forms.RadioSelect(),
label=_("Examination kind")
)
def clean_ident_hash(self):
data = self.cleaned_data["ident_hash"]
if data[6] == "/":
data = data[:6] + data[7:]
return data
class Patient_detail_form(forms.Form):
ident_hash = CZBirthNumberField()
def clean_ident_hash(self):
data = self.cleaned_data["ident_hash"]
if data[6] == "/":
data = data[:6] + data[7:]
return data
|
mmincikova/medobs
|
djcode/reservations/forms.py
|
Python
|
gpl-3.0
| 1,292
|
[
"VisIt"
] |
635d5f48b4da9e023d70d26c349a8abdff0dff76a8c21e827f66e9ea15822940
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['y', 'yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['n', 'no', 'off', '0', 'false', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
SIZE_RANGES = { 'Y': 1<<80, 'Z': 1<<70, 'E': 1<<60, 'P': 1<<50, 'T': 1<<40, 'G': 1<<30, 'M': 1<<20, 'K': 1<<10, 'B': 1 }
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import pipes
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG=True
except ImportError:
HAS_SYSLOG=False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX=False
try:
import selinux
HAVE_SELINUX=True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
from collections import Sequence, Mapping
except ImportError:
# python2.5
Sequence = (list, tuple)
Mapping = (dict,)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (PY2, PY3, b, binary_type, integer_types,
iteritems, text_type, string_types)
from ansible.module_utils.six.moves import map, reduce
from ansible.module_utils._text import to_native, to_bytes, to_text
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS=dict(
src = dict(),
mode = dict(type='raw'),
owner = dict(),
group = dict(),
seuser = dict(),
serole = dict(),
selevel = dict(),
setype = dict(),
follow = dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content = dict(no_log=True),
backup = dict(),
force = dict(),
remote_src = dict(), # used by assemble
regexp = dict(), # used by assemble
delimiter = dict(), # used by assemble
directory_mode = dict(), # used by copy
unsafe_writes = dict(type='bool'), # should be available to any module using atomic_move
attributes = dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch','alpine')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, encoding='utf-8', errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, encoding='utf-8', errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, SEQUENCETYPE):
return [remove_values(elem, no_log_strings) for elem in value]
elif isinstance(value, Mapping):
return dict((k, remove_values(v, no_log_strings)) for k, v in value.items())
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size)/ limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key,attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) #initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in self.params:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc,out,err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_then_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path = to_text(b_path, errors='surrogate_then_strict')
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags','') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except:
e = get_exception()
self.fail_json(path=path, msg='chattr failed', details=str(e))
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-','').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH}
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0}
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6 },
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3 },
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO }
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self, spec=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} #alias:canon
if spec is None:
spec = self.argument_spec
for (k,v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
for (k,v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif k == '_ansible_socket':
self._socket_path = v
elif check_invalid_arguments and k not in self._legal_inputs:
unsupported_parameters.add(k)
#clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
self.fail_json(msg="Unsupported parameters for (%s) module: %s. Supported parameters include: %s" % (self._name,
','.join(sorted(list(unsupported_parameters))),
','.join(sorted(self.argument_spec.keys()))))
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [ self._count_terms([field]) for field in check ]
non_zero = [ c for c in counts if c > 0 ]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self, spec=None, param=None ):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k,v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k,v) in spec.items():
choices = v.get('choices',None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
if param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
FALSEY = frozenset(BOOLEANS_FALSE)
overlap = FALSEY.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
TRUTHY = frozenset(BOOLEANS_TRUE)
overlap = TRUTHY.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str=",".join([to_native(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [ str(value) ]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return json.dumps(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param= self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if self.params[k] is None:
continue
wanted = 'str'
value = self.params[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
self.params[k] = type_checker(value)
except (TypeError, ValueError):
e = get_exception()
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" % (k, type(value), wanted, e))
# deal with subspecs
spec = None
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('spec', None)
if spec:
self._check_required_arguments(spec, param[k])
self._check_argument_types(spec, param[k])
self._check_argument_values(spec, param[k])
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for k,v in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and \
arg_opts.get('type', 'str') != 'bool' and \
not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK|os.R_OK):
raise
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK|os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None or isinstance(arg, bool):
return arg
if isinstance(arg, string_types):
arg = arg.lower()
if arg in BOOLEANS_TRUE:
return True
elif arg in BOOLEANS_FALSE:
return False
else:
self.fail_json(msg='%s is not a valid boolean. Valid booleans include: %s' % (to_text(arg), ','.join(['%s' % x for x in BOOLEANS])))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(d)
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
if not 'changed' in kwargs:
kwargs['changed'] = False
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError:
e = get_exception()
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp( prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError):
e = get_exception()
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc())
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e), exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
shell = False
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([pipes.quote(x) for x in args])
shell = True
elif isinstance(args, (binary_type, text_type)) and use_unsafe_shell:
shell = True
elif isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [ os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None ]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths \
if not x.endswith('/ansible_modlib.zip') \
and not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in to_clean_args:
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception:
e = get_exception()
self.log("Error Executing CMD:%s Exception:%s" % (clean_args,to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
prakritish/ansible
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 100,223
|
[
"VisIt"
] |
0de0f3299cf14c3716687614eb1ef904475ae6fbfc63a77f4ab337c0389f6497
|
# $Id$
#
# Copyright (C) 2004-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import RDConfig
from rdkit import Chem
import sys,csv
def Convert(suppl,outFile,keyCol='',stopAfter=-1,includeChirality=0,smilesFrom=''):
w = csv.writer(outFile)
mol = suppl[0]
propNames = list(mol.GetPropNames())
if keyCol and keyCol in propNames:
propNames.remove(keyCol)
outL = []
if keyCol:
outL.append(keyCol)
outL.append('SMILES')
outL.extend(propNames)
w.writerow(outL)
nDone = 0
for mol in suppl:
if not mol:
continue
if not smilesFrom or not mol.HasProp(smilesFrom):
smi = Chem.MolToSmiles(mol,includeChirality)
else:
smi = mol.GetProp(smilesFrom)
tMol = Chem.MolFromSmiles(smi)
smi = Chem.MolToSmiles(tMol,includeChirality)
outL = []
if keyCol:
outL.append(str(mol.GetProp(keyCol)))
outL.append(smi)
for prop in propNames:
if mol.HasProp(prop):
outL.append(str(mol.GetProp(prop)))
else:
outL.append('')
w.writerow(outL)
nDone += 1
if nDone == stopAfter:
break
return
#-------------------
# Testing:
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test1(self):
import os
from rdkit.six.moves import cStringIO as StringIO #@UnresolvedImport #pylint: disable=F0401
fName = os.path.join(RDConfig.RDDataDir,'NCI','first_200.props.sdf')
suppl = Chem.SDMolSupplier(fName)
io = StringIO()
try:
Convert(suppl,io)
except:
import traceback
traceback.print_exc()
self.fail('conversion failed')
txt = io.getvalue()
lines = txt.split('\n')
if not lines[-1]:
del lines[-1]
self.assertTrue(len(lines)==201,'bad num lines: %d'%len(lines))
line0 = lines[0].split(',')
self.assertEqual(len(line0),20)
self.assertTrue(line0[0]=='SMILES')
def test2(self):
import os
from rdkit.six.moves import cStringIO as StringIO #@UnresolvedImport #pylint: disable=F0401
fName = os.path.join(RDConfig.RDDataDir,'NCI','first_200.props.sdf')
suppl = Chem.SDMolSupplier(fName)
io = StringIO()
try:
Convert(suppl,io,keyCol='AMW',stopAfter=5)
except:
import traceback
traceback.print_exc()
self.fail('conversion failed')
txt = io.getvalue()
lines = txt.split('\n')
if not lines[-1]:
del lines[-1]
self.assertTrue(len(lines)==6,'bad num lines: %d'%len(lines))
line0 = lines[0].split(',')
self.assertEqual(len(line0),20)
self.assertTrue(line0[0]=='AMW')
self.assertTrue(line0[1]=='SMILES')
#-------------------
# CLI STuff:
def Usage():
message = """
Usage: SDFToCSV [-k keyCol] inFile.sdf [outFile.csv]
"""
sys.stderr.write(message)
sys.exit(-1)
if __name__=='__main__':
import getopt
try:
args,extras = getopt.getopt(sys.argv[1:],'hk:',
['test',
'chiral',
'smilesCol=',
])
except:
import traceback
traceback.print_exc()
Usage()
keyCol = ''
testIt = 0
useChirality=0
smilesCol=''
for arg,val in args:
if arg=='-k':
keyCol = val
elif arg=='--chiral':
useChirality=1
elif arg=='--smilesCol':
smilesCol=val
elif arg=='--test':
testIt=1
elif arg=='-h':
Usage()
if not testIt and len(extras)<1:
Usage()
if not testIt:
inFilename = extras[0]
if len(extras)>1:
outFilename = extras[1]
outF = open(outFilename,'w+')
else:
outF = sys.stdout
suppl = Chem.SDMolSupplier(inFilename)
Convert(suppl,outF,keyCol=keyCol,includeChirality=useChirality,smilesFrom=smilesCol)
else:
sys.argv = [sys.argv[0]]
unittest.main()
|
soerendip42/rdkit
|
rdkit/Chem/ChemUtils/SDFToCSV.py
|
Python
|
bsd-3-clause
| 4,077
|
[
"RDKit"
] |
81d4a1878fddab427da4a172049cb8882fe824bbf3f7acca98b4aafe2c2192fb
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_HTTPError,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
lowercase_escape,
smuggle_url,
unescapeHTML,
)
class NBCIE(InfoExtractor):
_VALID_URL = r'https?://www\.nbc\.com/(?:[^/]+/)+(?P<id>n?\d+)'
_TESTS = [
{
'url': 'http://www.nbc.com/the-tonight-show/segments/112966',
# md5 checksum is not stable
'info_dict': {
'id': 'c9xnCo0YPOPH',
'ext': 'flv',
'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s',
'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.',
},
},
{
'url': 'http://www.nbc.com/the-tonight-show/episodes/176',
'info_dict': {
'id': 'XwU9KZkp98TH',
'ext': 'flv',
'title': 'Ricky Gervais, Steven Van Zandt, ILoveMakonnen',
'description': 'A brand new episode of The Tonight Show welcomes Ricky Gervais, Steven Van Zandt and ILoveMakonnen.',
},
'skip': 'Only works from US',
},
{
'url': 'http://www.nbc.com/saturday-night-live/video/star-wars-teaser/2832821',
'info_dict': {
'id': '8iUuyzWDdYUZ',
'ext': 'flv',
'title': 'Star Wars Teaser',
'description': 'md5:0b40f9cbde5b671a7ff62fceccc4f442',
},
'skip': 'Only works from US',
},
{
# This video has expired but with an escaped embedURL
'url': 'http://www.nbc.com/parenthood/episode-guide/season-5/just-like-at-home/515',
'skip': 'Expired'
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
theplatform_url = unescapeHTML(lowercase_escape(self._html_search_regex(
[
r'(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"',
r'<iframe[^>]+src="((?:https?:)?//player\.theplatform\.com/[^"]+)"',
r'"embedURL"\s*:\s*"([^"]+)"'
],
webpage, 'theplatform url').replace('_no_endcard', '').replace('\\/', '/')))
if theplatform_url.startswith('//'):
theplatform_url = 'http:' + theplatform_url
return self.url_result(smuggle_url(theplatform_url, {'source_url': url}))
class NBCSportsVPlayerIE(InfoExtractor):
_VALID_URL = r'https?://vplayer\.nbcsports\.com/(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)'
_TESTS = [{
'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_share/select/9CsDKds0kvHI',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
}
}, {
'url': 'http://vplayer.nbcsports.com/p/BxmELC/nbc_embedshare/select/_hqLjQ95yx8Z',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
iframe_m = re.search(
r'<iframe[^>]+src="(?P<url>https?://vplayer\.nbcsports\.com/[^"]+)"', webpage)
if iframe_m:
return iframe_m.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
theplatform_url = self._og_search_video_url(webpage)
return self.url_result(theplatform_url, 'ThePlatform')
class NBCSportsIE(InfoExtractor):
# Does not include https becuase its certificate is invalid
_VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
_TEST = {
'url': 'http://www.nbcsports.com//college-basketball/ncaab/tom-izzo-michigan-st-has-so-much-respect-duke',
'info_dict': {
'id': 'PHJSaFWbrTY9',
'ext': 'flv',
'title': 'Tom Izzo, Michigan St. has \'so much respect\' for Duke',
'description': 'md5:ecb459c9d59e0766ac9c7d5d0eda8113',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return self.url_result(
NBCSportsVPlayerIE._extract_url(webpage), 'NBCSportsVPlayer')
class NBCNewsIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?nbcnews\.com/
(?:video/.+?/(?P<id>\d+)|
(?:watch|feature|nightly-news)/[^/]+/(?P<title>.+))
'''
_TESTS = [
{
'url': 'http://www.nbcnews.com/video/nbc-news/52753292',
'md5': '47abaac93c6eaf9ad37ee6c4463a5179',
'info_dict': {
'id': '52753292',
'ext': 'flv',
'title': 'Crew emerges after four-month Mars food study',
'description': 'md5:24e632ffac72b35f8b67a12d1b6ddfc1',
},
},
{
'url': 'http://www.nbcnews.com/feature/edward-snowden-interview/how-twitter-reacted-snowden-interview-n117236',
'md5': 'b2421750c9f260783721d898f4c42063',
'info_dict': {
'id': 'I1wpAI_zmhsQ',
'ext': 'mp4',
'title': 'How Twitter Reacted To The Snowden Interview',
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
},
'add_ie': ['ThePlatform'],
},
{
'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156',
'md5': 'fdbf39ab73a72df5896b6234ff98518a',
'info_dict': {
'id': 'Wjf9EDR3A_60',
'ext': 'mp4',
'title': 'FULL EPISODE: Family Business',
'description': 'md5:757988edbaae9d7be1d585eb5d55cc04',
},
},
{
'url': 'http://www.nbcnews.com/nightly-news/video/nightly-news-with-brian-williams-full-broadcast-february-4-394064451844',
'md5': 'b5dda8cddd8650baa0dcb616dd2cf60d',
'info_dict': {
'id': 'sekXqyTVnmN3',
'ext': 'mp4',
'title': 'Nightly News with Brian Williams Full Broadcast (February 4)',
'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
},
},
{
'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if video_id is not None:
all_info = self._download_xml('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, video_id)
info = all_info.find('video')
return {
'id': video_id,
'title': info.find('headline').text,
'ext': 'flv',
'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text,
'description': compat_str(info.find('caption').text),
'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text,
}
else:
# "feature" and "nightly-news" pages use theplatform.com
title = mobj.group('title')
webpage = self._download_webpage(url, title)
bootstrap_json = self._search_regex(
r'var\s+(?:bootstrapJson|playlistData)\s*=\s*({.+});?\s*$',
webpage, 'bootstrap json', flags=re.MULTILINE)
bootstrap = self._parse_json(bootstrap_json, video_id)
info = bootstrap['results'][0]['video']
mpxid = info['mpxId']
base_urls = [
info['fallbackPlaylistUrl'],
info['associatedPlaylistUrl'],
]
for base_url in base_urls:
if not base_url:
continue
playlist_url = base_url + '?form=MPXNBCNewsAPI'
try:
all_videos = self._download_json(playlist_url, title)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError):
continue
raise
if not all_videos or 'videos' not in all_videos:
continue
try:
info = next(v for v in all_videos['videos'] if v['mpxId'] == mpxid)
break
except StopIteration:
continue
if info is None:
raise ExtractorError('Could not find video in playlists')
return {
'_type': 'url',
# We get the best quality video
'url': info['videoAssets'][-1]['publicUrl'],
'ie_key': 'ThePlatform',
}
class MSNBCIE(InfoExtractor):
# https URLs redirect to corresponding http ones
_VALID_URL = r'http://www\.msnbc\.com/[^/]+/watch/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
'info_dict': {
'id': 'n_hayes_Aimm_140801_272214',
'ext': 'mp4',
'title': 'The chaotic GOP immigration vote',
'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1406937606,
'upload_date': '20140802',
'categories': ['MSNBC/Topics/Franchise/Best of last night', 'MSNBC/Topics/General/Congress'],
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
embed_url = self._html_search_meta('embedURL', webpage)
return self.url_result(embed_url)
|
Oteng/youtube-dl
|
youtube_dl/extractor/nbc.py
|
Python
|
unlicense
| 10,298
|
[
"Brian"
] |
7209fad7effb1883c09d95e6dd8f6054a7fd346d8470349d69094544d13d53b4
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# eclipses.py
# Waqas Bhatti and Luke Bouma - Feb 2017
# (wbhatti@astro.princeton.edu and luke@astro.princeton.edu)
'''Light curve fitting routines for eclipsing binaries:
- :py:func:`astrobase.lcfit.eclipses.gaussianeb_fit_magseries`: fit a double
inverted gaussian eclipsing binary model to the magnitude/flux time series
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
from functools import partial
from numpy import (
nan as npnan, sum as npsum, sqrt as npsqrt,
nonzero as npnonzero, diag as npdiag, median as npmedian,
inf as npinf, array as nparray
)
from scipy.optimize import curve_fit
from ..lcmath import sigclip_magseries
from ..lcmodels import eclipses
from .utils import make_fit_plot
from .nonphysical import spline_fit_magseries, savgol_fit_magseries
############################################
## DOUBLE INVERTED GAUSSIAN ECLIPSE MODEL ##
############################################
def gaussianeb_fit_magseries(
times, mags, errs,
ebparams,
param_bounds=None,
scale_errs_redchisq_unity=True,
sigclip=10.0,
plotfit=False,
magsarefluxes=False,
verbose=True,
curve_fit_kwargs=None,
):
'''This fits a double inverted gaussian EB model to a magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit the EB model to.
period : float
The period to use for EB fit.
ebparams : list of float
This is a list containing the eclipsing binary parameters::
ebparams = [period (time),
epoch (time),
pdepth (mags),
pduration (phase),
psdepthratio,
secondaryphase]
`period` is the period in days.
`epoch` is the time of primary minimum in JD.
`pdepth` is the depth of the primary eclipse:
- for magnitudes -> `pdepth` should be < 0
- for fluxes -> `pdepth` should be > 0
`pduration` is the length of the primary eclipse in phase.
`psdepthratio` is the ratio of the secondary eclipse depth to that of
the primary eclipse.
`secondaryphase` is the phase at which the minimum of the secondary
eclipse is located. This effectively parameterizes eccentricity.
If `epoch` is None, this function will do an initial spline fit to find
an approximate minimum of the phased light curve using the given period.
The `pdepth` provided is checked against the value of
`magsarefluxes`. if `magsarefluxes = True`, the `ebdepth` is forced to
be > 0; if `magsarefluxes = False`, the `ebdepth` is forced to be < 0.
param_bounds : dict or None
This is a dict of the upper and lower bounds on each fit
parameter. Should be of the form::
{'period': (lower_bound_period, upper_bound_period),
'epoch': (lower_bound_epoch, upper_bound_epoch),
'pdepth': (lower_bound_pdepth, upper_bound_pdepth),
'pduration': (lower_bound_pduration, upper_bound_pduration),
'psdepthratio': (lower_bound_psdepthratio,
upper_bound_psdepthratio),
'secondaryphase': (lower_bound_secondaryphase,
upper_bound_secondaryphase)}
- To indicate that a parameter is fixed, use 'fixed' instead of a tuple
providing its lower and upper bounds as tuple.
- To indicate that a parameter has no bounds, don't include it in the
param_bounds dict.
If this is None, the default value of this kwarg will be::
{'period':(0.0,np.inf), # period is between 0 and inf
'epoch':(0.0, np.inf), # epoch is between 0 and inf
'pdepth':(-np.inf,np.inf), # pdepth is between -np.inf and np.inf
'pduration':(0.0,1.0), # pduration is between 0.0 and 1.0
'psdepthratio':(0.0,1.0), # psdepthratio is between 0.0 and 1.0
'secondaryphase':(0.0,1.0), # secondaryphase is between 0.0 and 1.0
scale_errs_redchisq_unity : bool
If True, the standard errors on the fit parameters will be scaled to
make the reduced chi-sq = 1.0. This sets the ``absolute_sigma`` kwarg
for the ``scipy.optimize.curve_fit`` function to False.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
curve_fit_kwargs : dict or None
If not None, this should be a dict containing extra kwargs to pass to
the scipy.optimize.curve_fit function.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':the initial EB params provided,
'finalparams':the final model fit EB params,
'finalparamerrs':formal errors in the params,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# check the ebparams
ebperiod, ebepoch, ebdepth = ebparams[0:3]
# check if we have a ebepoch to use
if ebepoch is None:
if verbose:
LOGWARNING('no ebepoch given in ebparams, '
'trying to figure it out automatically...')
# do a spline fit to figure out the approximate min of the LC
try:
spfit = spline_fit_magseries(times, mags, errs, ebperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebepoch = spfit['fitinfo']['fitepoch']
# if the spline-fit fails, try a savgol fit instead
except Exception:
sgfit = savgol_fit_magseries(times, mags, errs, ebperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebepoch = sgfit['fitinfo']['fitepoch']
# if everything failed, then bail out and ask for the ebepoch
finally:
if ebepoch is None:
LOGERROR("couldn't automatically figure out the eb epoch, "
"can't continue. please provide it in ebparams.")
# assemble the returndict
returndict = {
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':ebparams,
'finalparams':None,
'finalparamerrs':None,
'fitmags':None,
'fitepoch':None,
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
else:
if ebepoch.size > 1:
if verbose:
LOGWARNING('could not auto-find a single minimum '
'for ebepoch, using the first one returned')
ebparams[1] = ebepoch[0]
else:
if verbose:
LOGWARNING(
'using automatically determined ebepoch = %.5f'
% ebepoch
)
ebparams[1] = ebepoch.item()
# next, check the ebdepth and fix it to the form required
if magsarefluxes:
if ebdepth < 0.0:
ebparams[2] = -ebdepth[2]
else:
if ebdepth > 0.0:
ebparams[2] = -ebdepth[2]
# finally, do the fit
try:
# set up the fit parameter bounds
if param_bounds is None:
curvefit_bounds = (
nparray([0.0, 0.0, -npinf, 0.0, 0.0, 0.0]),
nparray([npinf, npinf, npinf, 1.0, 1.0, 1.0])
)
fitfunc_fixed = {}
else:
# figure out the bounds
lower_bounds = []
upper_bounds = []
fitfunc_fixed = {}
for ind, key in enumerate(('period',
'epoch',
'pdepth',
'pduration',
'psdepthratio',
'secondaryphase')):
# handle fixed parameters
if (key in param_bounds and
isinstance(param_bounds[key], str) and
param_bounds[key] == 'fixed'):
lower_bounds.append(ebparams[ind]-1.0e-7)
upper_bounds.append(ebparams[ind]+1.0e-7)
fitfunc_fixed[key] = ebparams[ind]
# handle parameters with lower and upper bounds
elif key in param_bounds and isinstance(param_bounds[key],
(tuple,list)):
lower_bounds.append(param_bounds[key][0])
upper_bounds.append(param_bounds[key][1])
# handle no parameter bounds
else:
lower_bounds.append(-npinf)
upper_bounds.append(npinf)
# generate the bounds sequence in the required format
curvefit_bounds = (
nparray(lower_bounds),
nparray(upper_bounds)
)
#
# set up the curve fit function
#
curvefit_func = partial(eclipses.invgauss_eclipses_curvefit_func,
zerolevel=npmedian(smags),
fixed_params=fitfunc_fixed)
#
# run the fit
#
if curve_fit_kwargs is not None:
finalparams, covmatrix = curve_fit(
curvefit_func,
stimes, smags,
p0=ebparams,
sigma=serrs,
bounds=curvefit_bounds,
absolute_sigma=(not scale_errs_redchisq_unity),
**curve_fit_kwargs
)
else:
finalparams, covmatrix = curve_fit(
curvefit_func,
stimes, smags,
p0=ebparams,
sigma=serrs,
bounds=curvefit_bounds,
absolute_sigma=(not scale_errs_redchisq_unity),
)
except Exception:
LOGEXCEPTION("curve_fit returned an exception")
finalparams, covmatrix = None, None
# if the fit succeeded, then we can return the final parameters
if finalparams is not None and covmatrix is not None:
# calculate the chisq and reduced chisq
fitmags, phase, ptimes, pmags, perrs = eclipses.invgauss_eclipses_func(
finalparams,
stimes, smags, serrs
)
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) -
len(finalparams) -
len(fitfunc_fixed))
stderrs = npsqrt(npdiag(covmatrix))
if verbose:
LOGINFO(
'final fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# get the fit epoch
fperiod, fepoch = finalparams[:2]
# assemble the returndict
returndict = {
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':ebparams,
'finalparams':finalparams,
'finalparamerrs':stderrs,
'fitmags':fitmags,
'fitepoch':fepoch,
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'phase':phase,
'times':ptimes,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes,
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
fperiod, ptimes.min(), fepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
# if the leastsq fit failed, return nothing
else:
LOGERROR('eb-fit: least-squared fit to the light curve failed!')
# assemble the returndict
returndict = {
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':ebparams,
'finalparams':None,
'finalparamerrs':None,
'fitmags':None,
'fitepoch':None,
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
|
lgbouma/astrobase
|
astrobase/lcfit/eclipses.py
|
Python
|
mit
| 17,004
|
[
"Gaussian"
] |
7024c2d3f344a090e1fae27374830924347b228be6d376f8601d9841e700b08a
|
#!/usr/bin/env python
#JSON {"lot": "RKS/6-31G(d)",
#JSON "scf": "CDIISSCFSolver",
#JSON "er": "cholesky",
#JSON "difficulty": 7,
#JSON "description": "Basic RKS DFT example with hybrid MGGA exhange-correlation functional (TPSS)"}
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/water.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g(d)')
# Compute Gaussian integrals
olp = obasis.compute_overlap()
kin = obasis.compute_kinetic()
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers)
er_vecs = obasis.compute_electron_repulsion_cholesky()
# Define a numerical integration grid needed the XC functionals
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers)
# Create alpha orbitals
orb_alpha = Orbitals(obasis.nbasis)
# Initial guess
guess_core_hamiltonian(olp, kin + na, orb_alpha)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
libxc_term_x = RLibXCHybridMGGA('x_m05')
libxc_term_c = RLibXCMGGA('c_m05')
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er_vecs, 'hartree'),
RGridGroup(obasis, grid, [libxc_term_x, libxc_term_c]),
RExchangeTerm(er_vecs, 'x_hf', libxc_term_x.get_exx_fraction()),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons)
occ_model = AufbauOccModel(5)
# Converge WFN with CDIIS SCF
# - Construct the initial density matrix (needed for CDIIS).
occ_model.assign(orb_alpha)
dm_alpha = orb_alpha.to_dm()
# - SCF solver
scf_solver = CDIISSCFSolver(1e-6)
scf_solver(ham, olp, occ_model, dm_alpha)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = np.zeros(olp.shape)
ham.reset(dm_alpha)
ham.compute_energy()
ham.compute_fock(fock_alpha)
orb_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis. Note that the CDIIS algorithm can only really construct an
# optimized density matrix and no orbitals.
mol.title = 'RKS computation on water'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.orb_alpha = orb_alpha
mol.dm_alpha = dm_alpha
# useful for post-processing (results stored in double precision):
mol.to_file('water.h5')
# CODE BELOW IS FOR horton-regression-test.py ONLY. IT IS NOT PART OF THE EXAMPLE.
rt_results = {
'energy': ham.cache['energy'],
'orb_alpha': orb_alpha.energies,
'nn': ham.cache["energy_nn"],
'kin': ham.cache["energy_kin"],
'ne': ham.cache["energy_ne"],
'grid': ham.cache["energy_grid_group"],
'hartree': ham.cache["energy_hartree"],
'x_hf': ham.cache["energy_x_hf"],
}
# BEGIN AUTOGENERATED CODE. DO NOT CHANGE MANUALLY.
rt_previous = {
'energy': -76.372223106410885,
'orb_alpha': np.array([
-19.174675917533499, -1.0216889289766689, -0.54324149010045464,
-0.37631403914157158, -0.30196183487620326, 0.079896573985756419,
0.16296304612701332, 0.81419059490960388, 0.86377461055569127, 0.9243929453024935,
0.95050094195149326, 1.1033737076332981, 1.4108569929549999, 1.7561523962868733,
1.761532111350379, 1.8055689722633752, 2.3348442517458823, 2.6275437456471868
]),
'grid': -6.821114560989138,
'hartree': 46.93245844915478,
'kin': 76.05549816546615,
'ne': -199.18635862588496,
'nn': 9.1571750364299866,
'x_hf': -2.50988157058769,
}
|
theochem/horton
|
data/examples/hf_dft/rks_water_hybmgga.py
|
Python
|
gpl-3.0
| 3,761
|
[
"Gaussian"
] |
9adba042d7e460c70dd63bde21d0c0b492eb5c923cd71d174b61f64da268cc17
|
#!/usr/bin/env python
from setuptools import setup, find_packages
version = '0.1dev'
print '''------------------------------
Installing RNAseq version {}
------------------------------
'''.format(version)
setup(
name='rnaseq',
version=version,
author='lx Gui',
author_email='guilixuan@gmail.com',
keywords=['bioinformatics', 'NGS', 'RNAseq'],
license='GPLv3',
packages=find_packages(),
include_package_data=True,
scripts=['scripts/mrna',
'scripts/simple_qc',
'scripts/_qc_wrapper',
'scripts/get_fq_cfg',
'scripts/merge_files',
'scripts/fake_qc'],
install_requires=[
'luigi',
'pyyaml',
'envoy',
'xlsxwriter',
'pandas',
'rpy2<=2.8.6',
'packaging',
'docopt',
'HTSeq',
'click',
'Pillow',
'biopython',
'pathlib'],
)
print '''------------------------------
RNAseq installation complete!
------------------------------
'''
|
bioShaun/OMrnaseq
|
setup.py
|
Python
|
gpl-3.0
| 1,031
|
[
"Biopython",
"HTSeq"
] |
3106d783ef72895961475ed9447b36edc48971bb3cdc7332b775936b8c5562ee
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************************
espressopp.interaction.LennardJonesGromacs
******************************************
if :math:`d^2 > r_1^2`
.. math::
U = 4 \varepsilon (\frac{sigma^{12}}{d^{12}} - \frac{sigma^6}{d^6}) + (d-r_1)^3 (ljsw3 + ljsw4 (d-r_1) + ljsw5)
else
.. math::
U = 4 \varepsilon (\frac{\sigma^{12}}{d^{12}} - \frac{\sigma^6}{d^6})
.. function:: espressopp.interaction.LennardJonesGromacs(epsilon, sigma, r1, cutoff, shift)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param r1: (default: 0.0)
:param cutoff: (default: infinity)
:param shift: (default: "auto")
:type epsilon: real
:type sigma: real
:type r1: real
:type cutoff:
:type shift:
.. function:: espressopp.interaction.VerletListLennardJonesGromacs(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListLennardJonesGromacs.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListLennardJonesGromacs.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListLennardJonesGromacs(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListLennardJonesGromacs.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesGromacs(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesGromacs.setPotential(potential)
:param potential:
:type potential:
"""
"""
real sig2 = sigma * sigma;
real sig6 = sig2 * sig2 * sig2;
ff1 = 48 \varepsilon \sigma^{12}
ff2 = 24 \varepsilon \sigma^6
ef1 = 4 \varepsilon \sigma^{12}
ef2 = 4 \varepsilon \sigma^6
r1sq = r_^2
real t = r_c - r_1
real r6inv = \frac{1}{r_c^6}
real r8inv = \frac{1}{r_c^8}
real t2inv = \frac{1}{(r_c - r_1)^2}
real t3inv = \frac{1}{(r_c - r_1)^3}
real t3 = (r_c - r_1)^3
real a6 = \frac{7 r_1 - 10 r_c}{(r_c - r_1)^2 r_c^8}
real b6 = \frac{9 r_c - 7 r_1}{(r_c - r_1)^3 r_c^8};
real a12 = \frac{13 r_1 - 16 r_c}{(r_c - r_1)^2 r_c^{14}}
real b12 = \frac{15 r_c - 13 r_1}{(r_c - r_1)^3 r_c^{14}}
real c6 = \frac{1}{r_c^6} - (r_c - r_1)^3(\frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8} + \frac{(54 r_c - 42 r_1)(r_c - r_1)}{4(r_c - r_1)^3 r_c^8});
real c12 = \frac{1}{r_c^{12}} - (r_c - r_1)^3(\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}} + \frac{(180 r_c - 156 r_1)(r_c - r_1}{4(r_c - r_1)^3 r_c^{14}});
ljsw3 = -4 \varepsilon \sigma^{12} (\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8}
ljsw4 = -4 \varepsilon \sigma^{12} (\frac{180 r_c - 156 r_1}{4(r_c - r_1)^3 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{54 r_c - 42 r_1}{4(r_c - r_1)^3 r_c^8}
ljsw5 = -4 \varepsilon \sigma^{12} (\frac{1}{r_c^{12}} - (r_c - r_1)^3(\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}} + \frac{(180 r_c - 156 r_1)(r_c - r_1}{4(r_c - r_1)^3 r_c^{14}})) + 4 \varepsilon \sigma^6 \frac{1}{r_c^6} - (r_c - r_1)^3(\frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8} + \frac{(54 r_c - 42 r_1)(r_c - r_1)}{4(r_c - r_1)^3 r_c^8})
U = 4 \varepsilon (\frac{\sigma^{12}}{d^{12}} - \frac{\sigma^6}{d^6}) + (d-r_1)^3 ((((-4 \varepsilon \sigma^{12} (\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8}
) + (-4 \varepsilon \sigma^{12} (\frac{180 r_c - 156 r_1}{4(r_c - r_1)^3 r_c^{14}}) + 4 \varepsilon \sigma^6 \frac{54 r_c - 42 r_1}{4(r_c - r_1)^3 r_c^8}
) (d-r_1) + (-4 \varepsilon \sigma^{12} (\frac{1}{r_c^{12}} - (r_c - r_1)^3(\frac{156 r_1 - 192 r_c}{3(r_c - r_1)^2 r_c^{14}} + \frac{(180 r_c - 156 r_1)(r_c - r_1}{4(r_c - r_1)^3 r_c^{14}}))) + 4 \varepsilon \sigma^6 \frac{1}{r_c^6} - (r_c - r_1)^3(\frac{42 r_1 - 60 r_c}{3(r_c - r_1)^2 r_c^8} + \frac{(54 r_c - 42 r_1)(r_c - r_1)}{4(r_c - r_1)^3 r_c^8})
)))
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_LennardJonesGromacs, \
interaction_VerletListLennardJonesGromacs, \
interaction_CellListLennardJonesGromacs, \
interaction_FixedPairListLennardJonesGromacs
class LennardJonesGromacsLocal(PotentialLocal, interaction_LennardJonesGromacs):
def __init__(self, epsilon=1.0, sigma=1.0, r1=0.0,
cutoff=infinity, shift="auto"):
"""Initialize the local LennardJonesGromacs object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_LennardJonesGromacs,
epsilon, sigma, r1, cutoff)
else:
cxxinit(self, interaction_LennardJonesGromacs,
epsilon, sigma, r1, cutoff, shift)
class VerletListLennardJonesGromacsLocal(InteractionLocal, interaction_VerletListLennardJonesGromacs):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJonesGromacs, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class CellListLennardJonesGromacsLocal(InteractionLocal, interaction_CellListLennardJonesGromacs):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJonesGromacs, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLennardJonesGromacsLocal(InteractionLocal, interaction_FixedPairListLennardJonesGromacs):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJonesGromacs, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
if pmi.isController:
class LennardJonesGromacs(Potential):
'The LennardJonesGromacs potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.LennardJonesGromacsLocal',
pmiproperty = ['epsilon', 'sigma', 'r1']
)
class VerletListLennardJonesGromacs(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListLennardJonesGromacsLocal',
pmicall = ['setPotential','getPotential']
)
class CellListLennardJonesGromacs(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListLennardJonesGromacsLocal',
pmicall = ['setPotential']
)
class FixedPairListLennardJonesGromacs(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListLennardJonesGromacsLocal',
pmicall = ['setPotential']
)
|
kkreis/espressopp
|
src/interaction/LennardJonesGromacs.py
|
Python
|
gpl-3.0
| 9,198
|
[
"ESPResSo"
] |
eff898a3d95c8386971a9608c87b24188c95ddc4f98b6b55136f8a584145110b
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import numpy
def rotmatz(ang):
c = numpy.cos(ang)
s = numpy.sin(ang)
return numpy.array((( c, s, 0),
(-s, c, 0),
( 0, 0, 1),))
def rotmaty(ang):
c = numpy.cos(ang)
s = numpy.sin(ang)
return numpy.array((( c, 0, s),
( 0, 1, 0),
(-s, 0, c),))
def r2edge(ang, r):
return 2*r*numpy.sin(ang/2)
def make60(b5, b6):
theta1 = numpy.arccos(1/numpy.sqrt(5))
theta2 = (numpy.pi - theta1) * .5
r = (b5*2+b6)/2/numpy.sin(theta1/2)
rot72 = rotmatz(numpy.pi*2/5)
s1 = numpy.sin(theta1)
c1 = numpy.cos(theta1)
s2 = numpy.sin(theta2)
c2 = numpy.cos(theta2)
p1 = numpy.array(( s2*b5, 0, r-c2*b5))
p9 = numpy.array((-s2*b5, 0,-r+c2*b5))
p2 = numpy.array(( s2*(b5+b6), 0, r-c2*(b5+b6)))
rot1 = reduce(numpy.dot, (rotmaty(theta1), rot72, rotmaty(-theta1)))
p2s = []
for i in range(5):
p2s.append(p2)
p2 = numpy.dot(p2, rot1)
coord = []
for i in range(5):
coord.append(p1)
p1 = numpy.dot(p1, rot72)
for pj in p2s:
pi = pj
for i in range(5):
coord.append(pi)
pi = numpy.dot(pi, rot72)
for pj in p2s:
pi = pj
for i in range(5):
coord.append(-pi)
pi = numpy.dot(pi, rot72)
for i in range(5):
coord.append(p9)
p9 = numpy.dot(p9, rot72)
return numpy.array(coord)
def make12(b):
theta1 = numpy.arccos(1/numpy.sqrt(5))
theta2 = (numpy.pi - theta1) * .5
r = b/2/numpy.sin(theta1/2)
rot72 = rotmatz(numpy.pi*2/5)
s1 = numpy.sin(theta1)
c1 = numpy.cos(theta1)
p1 = numpy.array(( s1*r, 0, c1*r))
p2 = numpy.array((-s1*r, 0, -c1*r))
coord = [( 0, 0, r)]
for i in range(5):
coord.append(p1)
p1 = numpy.dot(p1, rot72)
for i in range(5):
coord.append(p2)
p2 = numpy.dot(p2, rot72)
coord.append(( 0, 0, -r))
return numpy.array(coord)
def make20(b):
theta1 = numpy.arccos(numpy.sqrt(5)/3)
theta2 = numpy.arcsin(r2edge(theta1,1)/2/numpy.sin(numpy.pi/5))
r = b/2/numpy.sin(theta1/2)
rot72 = rotmatz(numpy.pi*2/5)
s2 = numpy.sin(theta2)
c2 = numpy.cos(theta2)
s3 = numpy.sin(theta1+theta2)
c3 = numpy.cos(theta1+theta2)
p1 = numpy.array(( s2*r, 0, c2*r))
p2 = numpy.array(( s3*r, 0, c3*r))
p3 = numpy.array((-s3*r, 0, -c3*r))
p4 = numpy.array((-s2*r, 0, -c2*r))
coord = []
for i in range(5):
coord.append(p1)
p1 = numpy.dot(p1, rot72)
for i in range(5):
coord.append(p2)
p2 = numpy.dot(p2, rot72)
for i in range(5):
coord.append(p3)
p3 = numpy.dot(p3, rot72)
for i in range(5):
coord.append(p4)
p4 = numpy.dot(p4, rot72)
return numpy.array(coord)
if __name__ == '__main__':
b5 = 1.46
b6 = 1.38
for c in make60(b5, b6):
print(c)
b = 1.4
for c in make12(b):
print(c)
for c in make20(b):
print(c)
|
gkc1000/pyscf
|
pyscf/tools/c60struct.py
|
Python
|
apache-2.0
| 3,727
|
[
"PySCF"
] |
6beecba7e60d344837ce4aed5335628084c4bdca47cdb56b7afafc1b530b3847
|
import re
import json
import typing as t
from dataclasses import dataclass
from functools import partial
from collections import OrderedDict
from ..graph import Graph, Root, Node, Link, Option, Field, Nothing
from ..graph import GraphVisitor, GraphTransformer
from ..types import (
TypeRef,
String,
Sequence,
Boolean,
Optional,
TypeVisitor,
)
from ..types import Any, RecordMeta, AbstractTypeVisitor
from ..utils import (
listify,
cached_property,
)
from .types import (
SCALAR,
NON_NULL,
LIST,
INPUT_OBJECT,
OBJECT,
DIRECTIVE,
FieldIdent,
FieldArgIdent,
InputObjectFieldIdent,
DirectiveArgIdent,
)
@dataclass
class Directive:
@dataclass
class Argument:
name: str
type_ident: t.Any
description: str
default_value: t.Any
name: str
locations: t.List[str]
description: str
args: t.List[Argument]
@property
def args_map(self):
return OrderedDict((arg.name, arg) for arg in self.args)
_BUILTIN_DIRECTIVES = (
Directive(
name='skip',
locations=['FIELD', 'FRAGMENT_SPREAD', 'INLINE_FRAGMENT'],
description=(
'Directs the executor to skip this field or fragment '
'when the `if` argument is true.'
),
args=[
Directive.Argument(
name='if',
type_ident=NON_NULL(SCALAR('Boolean')),
description='Skipped when true.',
default_value=None,
),
],
),
Directive(
name='include',
locations=['FIELD', 'FRAGMENT_SPREAD', 'INLINE_FRAGMENT'],
description=(
'Directs the executor to include this field or fragment '
'only when the `if` argument is true.'
),
args=[
Directive.Argument(
name='if',
type_ident=NON_NULL(SCALAR('Boolean')),
description='Included when true.',
default_value=None,
),
],
),
)
def _async_wrapper(func):
async def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
QUERY_ROOT_NAME = 'Query'
MUTATION_ROOT_NAME = 'Mutation'
class SchemaInfo:
def __init__(
self,
query_graph: Graph,
mutation_graph: t.Optional[Graph] = None,
directives: t.Optional[t.Sequence[Directive]] = None,
):
self.query_graph = query_graph
self.data_types = query_graph.data_types
self.mutation_graph = mutation_graph
self.directives = directives or ()
@cached_property
def directives_map(self):
return OrderedDict((d.name, d) for d in self.directives)
class TypeIdent(AbstractTypeVisitor):
def __init__(self, graph, input_mode=False):
self._graph = graph
self._input_mode = input_mode
def visit_any(self, obj):
return SCALAR('Any')
def visit_mapping(self, obj):
return SCALAR('Any')
def visit_record(self, obj):
return SCALAR('Any')
def visit_callable(self, obj):
raise TypeError('Not expected here: {!r}'.format(obj))
def visit_sequence(self, obj):
return NON_NULL(LIST(self.visit(obj.__item_type__)))
def visit_optional(self, obj):
ident = self.visit(obj.__type__)
return ident.of_type if isinstance(ident, NON_NULL) else ident
def visit_typeref(self, obj):
if self._input_mode:
assert obj.__type_name__ in self._graph.data_types, \
obj.__type_name__
return NON_NULL(INPUT_OBJECT(obj.__type_name__))
else:
return NON_NULL(OBJECT(obj.__type_name__))
def visit_string(self, obj):
return NON_NULL(SCALAR('String'))
def visit_integer(self, obj):
return NON_NULL(SCALAR('Int'))
def visit_float(self, obj):
return NON_NULL(SCALAR('Float'))
def visit_boolean(self, obj):
return NON_NULL(SCALAR('Boolean'))
class UnsupportedGraphQLType(TypeError):
pass
class TypeValidator(TypeVisitor):
@classmethod
def is_valid(cls, type_):
try:
cls().visit(type_)
except UnsupportedGraphQLType:
return False
else:
return True
def visit_any(self, obj):
raise UnsupportedGraphQLType()
def visit_record(self, obj):
# inline Record type can't be directly matched to GraphQL type system
raise UnsupportedGraphQLType()
def not_implemented(*args, **kwargs):
raise NotImplementedError(args, kwargs)
def na_maybe(schema):
return Nothing
def na_many(schema, ids=None, options=None):
if ids is None:
return []
else:
return [[] for _ in ids]
def _nodes_map(schema: SchemaInfo):
nodes = [(n.name, n) for n in schema.query_graph.nodes]
nodes.append((QUERY_ROOT_NAME, schema.query_graph.root))
if schema.mutation_graph is not None:
nodes.append((MUTATION_ROOT_NAME, schema.mutation_graph.root))
return OrderedDict(nodes)
def schema_link(schema):
return None
def type_link(schema, options):
name = options['name']
if name in _nodes_map(schema):
return OBJECT(name)
else:
return Nothing
@listify
def root_schema_types(schema: SchemaInfo):
yield SCALAR('String')
yield SCALAR('Int')
yield SCALAR('Boolean')
yield SCALAR('Float')
yield SCALAR('Any')
for name in _nodes_map(schema):
yield OBJECT(name)
for name, type_ in schema.data_types.items():
if isinstance(type_, RecordMeta):
yield OBJECT(name)
yield INPUT_OBJECT(name)
def root_schema_query_type(schema):
return OBJECT(QUERY_ROOT_NAME)
def root_schema_mutation_type(schema):
if schema.mutation_graph is not None:
return OBJECT(MUTATION_ROOT_NAME)
else:
return Nothing
def root_schema_directives(schema):
return [
DIRECTIVE(directive.name) for directive in schema.directives
]
@listify
def type_info(schema, fields, ids):
nodes_map = _nodes_map(schema)
for ident in ids:
if isinstance(ident, OBJECT):
if ident.name in nodes_map:
description = nodes_map[ident.name].description
else:
description = None
info = {'id': ident,
'kind': 'OBJECT',
'name': ident.name,
'description': description}
elif isinstance(ident, INPUT_OBJECT):
info = {'id': ident,
'kind': 'INPUT_OBJECT',
'name': 'IO{}'.format(ident.name),
'description': None}
elif isinstance(ident, NON_NULL):
info = {'id': ident,
'kind': 'NON_NULL'}
elif isinstance(ident, LIST):
info = {'id': ident,
'kind': 'LIST'}
elif isinstance(ident, SCALAR):
info = {'id': ident,
'name': ident.name,
'kind': 'SCALAR'}
else:
raise TypeError(repr(ident))
yield [info.get(f.name) for f in fields]
@listify
def type_fields_link(schema, ids, options):
nodes_map = _nodes_map(schema)
for ident in ids:
if isinstance(ident, OBJECT):
if ident.name in nodes_map:
node = nodes_map[ident.name]
field_idents = [
FieldIdent(ident.name, f.name)
for f in node.fields if not f.name.startswith('_')
]
else:
type_ = schema.data_types[ident.name]
field_idents = [
FieldIdent(ident.name, f_name)
for f_name, f_type in type_.__field_types__.items()
]
if not field_idents:
raise TypeError('Object type "{}" does not contain fields, '
'which is not acceptable for GraphQL in order '
'to define schema type'.format(ident.name))
yield field_idents
else:
yield []
@listify
def type_of_type_link(schema, ids):
for ident in ids:
if isinstance(ident, (NON_NULL, LIST)):
yield ident.of_type
else:
yield Nothing
@listify
def field_info(schema, fields, ids):
nodes_map = _nodes_map(schema)
for ident in ids:
if ident.node in nodes_map:
node = nodes_map[ident.node]
field = node.fields_map[ident.name]
info = {'id': ident,
'name': field.name,
'description': field.description,
'isDeprecated': False,
'deprecationReason': None}
else:
info = {'id': ident,
'name': ident.name,
'description': None,
'isDeprecated': False,
'deprecationReason': None}
yield [info[f.name] for f in fields]
@listify
def field_type_link(schema, ids):
nodes_map = _nodes_map(schema)
type_ident = TypeIdent(schema.query_graph)
for ident in ids:
if ident.node in nodes_map:
node = nodes_map[ident.node]
field = node.fields_map[ident.name]
yield type_ident.visit(field.type or Any)
else:
data_type = schema.data_types[ident.node]
field_type = data_type.__field_types__[ident.name]
yield type_ident.visit(field_type)
@listify
def field_args_link(schema, ids):
nodes_map = _nodes_map(schema)
for ident in ids:
if ident.node in nodes_map:
node = nodes_map[ident.node]
field = node.fields_map[ident.name]
yield [FieldArgIdent(ident.node, field.name, option.name)
for option in field.options]
else:
yield []
@listify
def type_input_object_input_fields_link(schema, ids):
for ident in ids:
if isinstance(ident, INPUT_OBJECT):
data_type = schema.data_types[ident.name]
yield [InputObjectFieldIdent(ident.name, key)
for key in data_type.__field_types__.keys()]
else:
yield []
@listify
def input_value_info(schema, fields, ids):
nodes_map = _nodes_map(schema)
for ident in ids:
if isinstance(ident, FieldArgIdent):
node = nodes_map[ident.node]
field = node.fields_map[ident.field]
option = field.options_map[ident.name]
if option.default is Nothing:
default = None
else:
default = json.dumps(option.default)
info = {'id': ident,
'name': option.name,
'description': option.description,
'defaultValue': default}
yield [info[f.name] for f in fields]
elif isinstance(ident, InputObjectFieldIdent):
info = {'id': ident,
'name': ident.key,
'description': None,
'defaultValue': None}
yield [info[f.name] for f in fields]
elif isinstance(ident, DirectiveArgIdent):
directive = schema.directives_map[ident.name]
arg = directive.args_map[ident.arg]
info = {'id': ident,
'name': arg.name,
'description': arg.description,
'defaultValue': arg.default_value}
yield [info[f.name] for f in fields]
else:
raise TypeError(repr(ident))
@listify
def input_value_type_link(schema, ids):
nodes_map = _nodes_map(schema)
type_ident = TypeIdent(schema.query_graph, input_mode=True)
for ident in ids:
if isinstance(ident, FieldArgIdent):
node = nodes_map[ident.node]
field = node.fields_map[ident.field]
option = field.options_map[ident.name]
yield type_ident.visit(option.type)
elif isinstance(ident, InputObjectFieldIdent):
data_type = schema.data_types[ident.name]
field_type = data_type.__field_types__[ident.key]
yield type_ident.visit(field_type)
elif isinstance(ident, DirectiveArgIdent):
directive = schema.directives_map[ident.name]
for arg in directive.args:
yield arg.type_ident
else:
raise TypeError(repr(ident))
@listify
def directive_value_info(schema, fields, ids):
for ident in ids:
if ident.name in schema.directives_map:
directive = schema.directives_map[ident.name]
info = {'name': directive.name,
'description': directive.description,
'locations': directive.locations}
yield [info[f.name] for f in fields]
def directive_args_link(schema, ids):
links = []
for ident in ids:
directive = schema.directives_map[ident]
links.append([DirectiveArgIdent(ident, arg.name)
for arg in directive.args])
return links
GRAPH = Graph([
Node('__Type', [
Field('id', None, type_info),
Field('kind', String, type_info),
Field('name', String, type_info),
Field('description', String, type_info),
# OBJECT and INTERFACE only
Link('fields', Sequence[TypeRef['__Field']], type_fields_link,
requires='id',
options=[Option('includeDeprecated', Boolean, default=False)]),
# OBJECT only
Link('interfaces', Sequence[TypeRef['__Type']], na_many,
requires='id'),
# INTERFACE and UNION only
Link('possibleTypes', Sequence[TypeRef['__Type']], na_many,
requires='id'),
# ENUM only
Link('enumValues', Sequence[TypeRef['__EnumValue']], na_many,
requires='id',
options=[Option('includeDeprecated', Boolean, default=False)]),
# INPUT_OBJECT only
Link('inputFields', Sequence[TypeRef['__InputValue']],
type_input_object_input_fields_link, requires='id'),
# NON_NULL and LIST only
Link('ofType', Optional[TypeRef['__Type']], type_of_type_link,
requires='id'),
]),
Node('__Field', [
Field('id', None, field_info),
Field('name', String, field_info),
Field('description', String, field_info),
Link('args', Sequence[TypeRef['__InputValue']], field_args_link,
requires='id'),
Link('type', TypeRef['__Type'], field_type_link, requires='id'),
Field('isDeprecated', Boolean, field_info),
Field('deprecationReason', String, field_info),
]),
Node('__InputValue', [
Field('id', None, input_value_info),
Field('name', String, input_value_info),
Field('description', String, input_value_info),
Link('type', TypeRef['__Type'], input_value_type_link, requires='id'),
Field('defaultValue', String, input_value_info),
]),
Node('__Directive', [
Field('name', String, directive_value_info),
Field('description', String, directive_value_info),
Field('locations', Sequence[String], directive_value_info),
Link('args', Sequence[TypeRef['__InputValue']], directive_args_link,
requires='name'),
]),
Node('__EnumValue', [
Field('name', String, not_implemented),
Field('description', String, not_implemented),
Field('isDeprecated', Boolean, not_implemented),
Field('deprecationReason', String, not_implemented),
]),
Node('__Schema', [
Link('types', Sequence[TypeRef['__Type']], root_schema_types,
requires=None),
Link('queryType', TypeRef['__Type'],
root_schema_query_type, requires=None),
Link('mutationType', Optional[TypeRef['__Type']],
root_schema_mutation_type, requires=None),
Link('subscriptionType', Optional[TypeRef['__Type']], na_maybe,
requires=None),
Link('directives', Sequence[TypeRef['__Directive']],
root_schema_directives, requires=None),
]),
Root([
Link('__schema', TypeRef['__Schema'], schema_link, requires=None),
Link('__type', Optional[TypeRef['__Type']], type_link, requires=None,
options=[Option('name', String)]),
]),
])
class ValidateGraph(GraphVisitor):
_name_re = re.compile(r'^[_a-zA-Z]\w*$', re.ASCII)
def __init__(self):
self._path = []
self._errors = []
def _add_error(self, name, description):
path = '.'.join(self._path + [name])
self._errors.append('{}: {}'.format(path, description))
@classmethod
def validate(cls, graph):
self = cls()
self.visit(graph)
if self._errors:
raise ValueError('Invalid GraphQL graph:\n{}'
.format('\n'.join('- {}'.format(err)
for err in self._errors)))
def visit_node(self, obj):
if not self._name_re.match(obj.name):
self._add_error(obj.name,
'Invalid node name: {}'.format(obj.name))
if obj.fields:
self._path.append(obj.name)
super(ValidateGraph, self).visit_node(obj)
self._path.pop()
else:
self._add_error(obj.name,
'No fields in the {} node'.format(obj.name))
def visit_root(self, obj):
if obj.fields:
self._path.append('Root')
super(ValidateGraph, self).visit_root(obj)
self._path.pop()
else:
self._add_error('Root', 'No fields in the Root node')
def visit_field(self, obj):
if not self._name_re.match(obj.name):
self._add_error(obj.name,
'Invalid field name: {}'.format(obj.name))
super(ValidateGraph, self).visit_field(obj)
def visit_link(self, obj):
if not self._name_re.match(obj.name):
self._add_error(obj.name,
'Invalid link name: {}'.format(obj.name))
super(ValidateGraph, self).visit_link(obj)
def visit_option(self, obj):
if not self._name_re.match(obj.name):
self._add_error(obj.name,
'Invalid option name: {}'.format(obj.name))
super(ValidateGraph, self).visit_option(obj)
class BindToSchema(GraphTransformer):
def __init__(self, schema):
self.schema = schema
self._processed = {}
def visit_field(self, obj):
field = super(BindToSchema, self).visit_field(obj)
func = self._processed.get(obj.func)
if func is None:
func = self._processed[obj.func] = partial(obj.func, self.schema)
field.func = func
return field
def visit_link(self, obj):
link = super(BindToSchema, self).visit_link(obj)
link.func = partial(link.func, self.schema)
return link
class MakeAsync(GraphTransformer):
def __init__(self):
self._processed = {}
def visit_field(self, obj):
field = super(MakeAsync, self).visit_field(obj)
func = self._processed.get(obj.func)
if func is None:
func = self._processed[obj.func] = _async_wrapper(obj.func)
field.func = func
return field
def visit_link(self, obj):
link = super(MakeAsync, self).visit_link(obj)
link.func = _async_wrapper(link.func)
return link
def type_name_field_func(node_name, fields, ids=None):
return [[node_name] for _ in ids] if ids is not None else [node_name]
class AddIntrospection(GraphTransformer):
def __init__(self, introspection_graph, type_name_field_factory):
self.introspection_graph = introspection_graph
self.type_name_field_factory = type_name_field_factory
def visit_node(self, obj):
node = super(AddIntrospection, self).visit_node(obj)
node.fields.append(self.type_name_field_factory(obj.name))
return node
def visit_root(self, obj):
root = super(AddIntrospection, self).visit_root(obj)
root.fields.append(self.type_name_field_factory(QUERY_ROOT_NAME))
return root
def visit_graph(self, obj):
graph = super(AddIntrospection, self).visit_graph(obj)
graph.items.extend(self.introspection_graph.items)
return graph
class GraphQLIntrospection(GraphTransformer):
"""Adds GraphQL introspection into synchronous graph
Example:
.. code-block:: python
from hiku.graph import apply
from hiku.introspection.graphql import GraphQLIntrospection
graph = apply(graph, [GraphQLIntrospection(graph)])
"""
__directives__ = _BUILTIN_DIRECTIVES
def __init__(self, query_graph, mutation_graph=None):
"""
:param query_graph: graph, where Root node represents Query root
operation type
:param mutation_graph: graph, where Root node represents Mutation root
operation type
"""
self._schema = SchemaInfo(
query_graph,
mutation_graph,
self.__directives__,
)
def __type_name__(self, node_name):
return Field('__typename', String,
partial(type_name_field_func, node_name))
def __introspection_graph__(self):
return BindToSchema(self._schema).visit(GRAPH)
def visit_node(self, obj):
node = super(GraphQLIntrospection, self).visit_node(obj)
node.fields.append(self.__type_name__(obj.name))
return node
def visit_root(self, obj):
root = super(GraphQLIntrospection, self).visit_root(obj)
root.fields.append(self.__type_name__(QUERY_ROOT_NAME))
return root
def visit_graph(self, obj):
ValidateGraph.validate(obj)
introspection_graph = self.__introspection_graph__()
items = [self.visit(node) for node in obj.items]
items.extend(introspection_graph.items)
return Graph(items, data_types=obj.data_types)
class AsyncGraphQLIntrospection(GraphQLIntrospection):
"""Adds GraphQL introspection into asynchronous graph
Example:
.. code-block:: python
from hiku.graph import apply
from hiku.introspection.graphql import AsyncGraphQLIntrospection
graph = apply(graph, [AsyncGraphQLIntrospection(graph)])
"""
def __type_name__(self, node_name):
return Field('__typename', String,
_async_wrapper(partial(type_name_field_func, node_name)))
def __introspection_graph__(self):
graph = super(AsyncGraphQLIntrospection, self).__introspection_graph__()
graph = MakeAsync().visit(graph)
return graph
|
vmagamedov/hiku
|
hiku/introspection/graphql.py
|
Python
|
bsd-3-clause
| 22,919
|
[
"VisIt"
] |
5dfb7f5f2f7c869146e37e88241918c6e32eee99c34366f4ba3f97521bd23d07
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.270927
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/getaudiotracks.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getaudiotracks(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getaudiotracks, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_77795892 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2audiotracklist>
''')
for track in VFFSL(SL,"tracklist",True): # generated from line 4, col 2
write(u'''\t\t<e2audiotrack>
\t\t\t<e2audiotrackdescription>''')
_v = VFFSL(SL,"track.description",True) # u'$track.description' on line 6, col 29
if _v is not None: write(_filter(_v, rawExpr=u'$track.description')) # from line 6, col 29.
write(u'''</e2audiotrackdescription>
\t\t\t<e2audiotrackid>''')
_v = VFFSL(SL,"track.index",True) # u'$track.index' on line 7, col 20
if _v is not None: write(_filter(_v, rawExpr=u'$track.index')) # from line 7, col 20.
write(u'''</e2audiotrackid>
\t\t\t<e2audiotrackpid>''')
_v = VFFSL(SL,"track.pid",True) # u'$track.pid' on line 8, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$track.pid')) # from line 8, col 21.
write(u'''</e2audiotrackpid>
\t\t\t<e2audiotrackactive>''')
_v = VFFSL(SL,"track.active",True) # u'$track.active' on line 9, col 24
if _v is not None: write(_filter(_v, rawExpr=u'$track.active')) # from line 9, col 24.
write(u'''</e2audiotrackactive>
\t\t</e2audiotrack>
''')
write(u'''</e2audiotracklist>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_77795892
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getaudiotracks= 'respond'
## END CLASS DEFINITION
if not hasattr(getaudiotracks, '_initCheetahAttributes'):
templateAPIClass = getattr(getaudiotracks, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getaudiotracks)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getaudiotracks()).run()
|
pli3/Openwebif
|
plugin/controllers/views/web/getaudiotracks.py
|
Python
|
gpl-2.0
| 6,042
|
[
"VisIt"
] |
836752fb95eb42bb0914c9af21ed3b1a012b2b3d08875f57b7243d807d326bbd
|
import pytest
import betterbib
@pytest.mark.parametrize(
"string,ref",
[
(
"The Magnus expansion and some of its applications",
"The {Magnus} expansion and some of its applications",
),
(
"On generalized averaged Gaussian formulas, II",
"On generalized averaged {Gaussian} formulas, {II}",
),
("Gaussian Hermitian Jacobian", "{Gaussian} {Hermitian} {Jacobian}"),
(
"VODE: a variable-coefficient ODE solver",
"{VODE:} {A} variable-coefficient {ODE} solver",
),
(
"GMRES: A generalized minimal residual algorithm",
"{GMRES:} {A} generalized minimal residual algorithm",
),
(
"Peano's kernel theorem for vector-valued functions",
"{Peano's} kernel theorem for vector-valued functions",
),
(
"Exponential Runge-Kutta methods for parabolic problems",
"Exponential {Runge}-{Kutta} methods for parabolic problems",
),
(
"Dash-Dash Double--Dash Triple---Dash",
"Dash-Dash Double--Dash Triple---Dash",
),
("x: {X}", "x: {X}"),
(
"{Aaa ${\\text{Pt/Co/AlO}}_{x}$ aaa bbb}",
"{Aaa {${\\text{Pt/Co/AlO}}_{x}$} aaa bbb}",
),
("z*", "z*"),
("A \\LaTeX title", "A \\LaTeX title"),
],
)
def test_translate_title(string, ref):
assert betterbib.tools._translate_title(string) == ref
|
nschloe/betterbib
|
tests/test_bibtex_title.py
|
Python
|
gpl-3.0
| 1,535
|
[
"Gaussian"
] |
c0296b4ced3a6849f6db2a109cc84d031e243ff59fad42b7836ac89aa7c9cc83
|
# coding: utf-8
# # Machine Learning Engineer Nanodegree
# ## Supervised Learning
# ## Project: Finding Donors for *CharityML*
# Welcome to the second project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!
#
# In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
#
# >**Note:** Please specify WHICH VERSION OF PYTHON you are using when submitting this notebook. Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
# ## Getting Started
#
# In this project, you will employ several supervised algorithms of your choice to accurately model individuals' income using data collected from the 1994 U.S. Census. You will then choose the best candidate algorithm from preliminary results and further optimize this algorithm to best model the data. Your goal with this implementation is to construct a model that accurately predicts whether an individual makes more than $50,000. This sort of task can arise in a non-profit setting, where organizations survive on donations. Understanding an individual's income can help a non-profit better understand how large of a donation to request, or whether or not they should reach out to begin with. While it can be difficult to determine an individual's general income bracket directly from public sources, we can (as we will see) infer this value from other publically available features.
#
# The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Census+Income). The datset was donated by Ron Kohavi and Barry Becker, after being published in the article _"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid"_. You can find the article by Ron Kohavi [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). The data we investigate here consists of small changes to the original dataset, such as removing the `'fnlwgt'` feature and records with missing or ill-formatted entries.
# ----
# ## Exploring the Data
# Run the code cell below to load necessary Python libraries and load the census data. Note that the last column from this dataset, `'income'`, will be our target label (whether an individual makes more than, or at most, $50,000 annually). All other columns are features about each individual in the census database.
# In[1]:
from __future__ import division
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualization code visuals.py
import visuals as vs
# Pretty display for notebooks
get_ipython().magic(u'matplotlib inline')
# Load the Census dataset
data = pd.read_csv("census.csv")
# Success - Display the first record
display(data.head(n=1))
# ### Implementation: Data Exploration
# A cursory investigation of the dataset will determine how many individuals fit into either group, and will tell us about the percentage of these individuals making more than \$50,000. In the code cell below, you will need to compute the following:
# - The total number of records, `'n_records'`
# - The number of individuals making more than \$50,000 annually, `'n_greater_50k'`.
# - The number of individuals making at most \$50,000 annually, `'n_at_most_50k'`.
# - The percentage of individuals making more than \$50,000 annually, `'greater_percent'`.
#
# ** HINT: ** You may need to look at the table above to understand how the `'income'` entries are formatted.
# In[2]:
# Total number of records
n_records = data.shape[0]
# Number of records where individual's income is more than $50,000
n_greater_50k = data[data.income == ">50K"].shape[0]
# Number of records where individual's income is at most $50,000
n_at_most_50k = data[data.income == "<=50K"].shape[0]
# Percentage of individuals whose income is more than $50,000
greater_percent = n_greater_50k / n_records * 100
# Print the results
print "Total number of records: {}".format(n_records)
print "Individuals making more than $50,000: {}".format(n_greater_50k)
print "Individuals making at most $50,000: {}".format(n_at_most_50k)
print "Percentage of individuals making more than $50,000: {:.2f}%".format(greater_percent)
# ** Featureset Exploration **
#
# * **age**: continuous.
# * **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
# * **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# * **education-num**: continuous.
# * **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
# * **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
# * **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# * **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other.
# * **sex**: Female, Male.
# * **capital-gain**: continuous.
# * **capital-loss**: continuous.
# * **hours-per-week**: continuous.
# * **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
# ----
# ## Preparing the Data
# Before data can be used as input for machine learning algorithms, it often must be cleaned, formatted, and restructured — this is typically known as **preprocessing**. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.
# ### Transforming Skewed Continuous Features
# A dataset may sometimes contain at least one feature whose values tend to lie near a single number, but will also have a non-trivial number of vastly larger or smaller values than that single number. Algorithms can be sensitive to such distributions of values and can underperform if the range is not properly normalized. With the census dataset two features fit this description: '`capital-gain'` and `'capital-loss'`.
#
# Run the code cell below to plot a histogram of these two features. Note the range of the values present and how they are distributed.
# In[3]:
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
# For highly-skewed feature distributions such as `'capital-gain'` and `'capital-loss'`, it is common practice to apply a <a href="https://en.wikipedia.org/wiki/Data_transformation_(statistics)">logarithmic transformation</a> on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. Care must be taken when applying this transformation however: The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.
#
# Run the code cell below to perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed.
# In[4]:
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
# ### Normalizing Numerical Features
# In addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Applying a scaling to the data does not change the shape of each feature's distribution (such as `'capital-gain'` or `'capital-loss'` above); however, normalization ensures that each feature is treated equally when applying supervised learners. Note that once scaling is applied, observing the data in its raw form will no longer have the same original meaning, as exampled below.
#
# Run the code cell below to normalize each numerical feature. We will use [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) for this.
# In[5]:
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
display(features_log_minmax_transform.head(n = 5))
# ### Implementation: Data Preprocessing
#
# From the table in **Exploring the Data** above, we can see there are several features for each record that are non-numeric. Typically, learning algorithms expect input to be numeric, which requires that non-numeric features (called *categorical variables*) be converted. One popular way to convert categorical variables is by using the **one-hot encoding** scheme. One-hot encoding creates a _"dummy"_ variable for each possible category of each non-numeric feature. For example, assume `someFeature` has three possible entries: `A`, `B`, or `C`. We then encode this feature into `someFeature_A`, `someFeature_B` and `someFeature_C`.
#
# | | someFeature | | someFeature_A | someFeature_B | someFeature_C |
# | :-: | :-: | | :-: | :-: | :-: |
# | 0 | B | | 0 | 1 | 0 |
# | 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 |
# | 2 | A | | 1 | 0 | 0 |
#
# Additionally, as with the non-numeric features, we need to convert the non-numeric target label, `'income'` to numerical values for the learning algorithm to work. Since there are only two possible categories for this label ("<=50K" and ">50K"), we can avoid using one-hot encoding and simply encode these two categories as `0` and `1`, respectively. In code cell below, you will need to implement the following:
# - Use [`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummies#pandas.get_dummies) to perform one-hot encoding on the `'features_raw'` data.
# - Convert the target label `'income_raw'` to numerical entries.
# - Set records with "<=50K" to `0` and records with ">50K" to `1`.
# In[6]:
# One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
categorical = ['workclass', 'education_level', 'marital-status', 'occupation', 'relationship',
'race','sex']
features_categorical = pd.DataFrame(data = features_log_minmax_transform[categorical])
features_categorical = pd.get_dummies(features_categorical)
features_final = pd.concat([features_log_minmax_transform[numerical], features_categorical], axis=1)
# Encode the 'income_raw' data to numerical values
income = pd.Series(data = income_raw )
income = income.map({'<=50K': 0, '>50K': 1})
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print "{} total features after one-hot encoding.".format(len(encoded))
# Uncomment the following line to see the encoded feature names
#print encoded
# ### Shuffle and Split Data
# Now all _categorical variables_ have been converted into numerical features, and all numerical features have been normalized. As always, we will now split the data (both features and their labels) into training and test sets. 80% of the data will be used for training and 20% for testing.
#
# Run the code cell below to perform this split.
# In[7]:
# Import train_test_split
from sklearn.cross_validation import train_test_split
# Split the 'features' and 'income' data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features_final,
income,
test_size = 0.2,
random_state = 0)
# Show the results of the split
print "Training set has {} samples.".format(X_train.shape[0])
print "Testing set has {} samples.".format(X_test.shape[0])
# ----
# ## Evaluating Model Performance
# In this section, we will investigate four different algorithms, and determine which is best at modeling the data. Three of these algorithms will be supervised learners of your choice, and the fourth algorithm is known as a *naive predictor*.
# ### Metrics and the Naive Predictor
# *CharityML*, equipped with their research, knows individuals that make more than \$50,000 are most likely to donate to their charity. Because of this, *CharityML* is particularly interested in predicting who makes more than \$50,000 accurately. It would seem that using **accuracy** as a metric for evaluating a particular model's performace would be appropriate. Additionally, identifying someone that *does not* make more than \$50,000 as someone who does would be detrimental to *CharityML*, since they are looking to find individuals willing to donate. Therefore, a model's ability to precisely predict those that make more than \$50,000 is *more important* than the model's ability to **recall** those individuals. We can use **F-beta score** as a metric that considers both precision and recall:
#
# $$ F_{\beta} = (1 + \beta^2) \cdot \frac{precision \cdot recall}{\left( \beta^2 \cdot precision \right) + recall} $$
#
# In particular, when $\beta = 0.5$, more emphasis is placed on precision. This is called the **F$_{0.5}$ score** (or F-score for simplicity).
#
# Looking at the distribution of classes (those who make at most \$50,000, and those who make more), it's clear most individuals do not make more than \$50,000. This can greatly affect **accuracy**, since we could simply say *"this person does not make more than \$50,000"* and generally be right, without ever looking at the data! Making such a statement would be called **naive**, since we have not considered any information to substantiate the claim. It is always important to consider the *naive prediction* for your data, to help establish a benchmark for whether a model is performing well. That been said, using that prediction would be pointless: If we predicted all people made less than \$50,000, *CharityML* would identify no one as donors.
#
#
# #### Note: Recap of accuracy, precision, recall
#
# ** Accuracy ** measures how often the classifier makes the correct prediction. It’s the ratio of the number of correct predictions to the total number of predictions (the number of test data points).
#
# ** Precision ** tells us what proportion of messages we classified as spam, actually were spam.
# It is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classificatio), in other words it is the ratio of
#
# `[True Positives/(True Positives + False Positives)]`
#
# ** Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.
# It is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of
#
# `[True Positives/(True Positives + False Negatives)]`
#
# For classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average(harmonic mean) of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score(we take the harmonic mean as we are dealing with ratios).
# ### Question 1 - Naive Predictor Performace
# * If we chose a model that always predicted an individual made more than $50,000, what would that model's accuracy and F-score be on this dataset? You must use the code cell below and assign your results to `'accuracy'` and `'fscore'` to be used later.
#
# ** HINT: **
#
# * When we have a model that always predicts '1' (i.e. the individual makes more than 50k) then our model will have no True Negatives(TN) or False Negatives(FN) as we are not making any negative('0' value) predictions. Therefore our Accuracy in this case becomes the same as our Precision(True Positives/(True Positives + False Positives)) as every prediction that we have made with value '1' that should have '0' becomes a False Positive; therefore our denominator in this case is the total number of records we have in total.
# * Our Recall score(True Positives/(True Positives + False Negatives)) in this setting becomes 1 as we have no False Negatives.
# In[8]:
TP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data
#encoded to numerical values done in the data preprocessing step.
FP = income.count() - TP # Specific to the naive case
TN = 0 # No predicted negatives in the naive case
FN = 0 # No predicted negatives in the naive case
# Calculate accuracy, precision and recall
accuracy = (TP+TN) / (TP+FP+TN+FN)
recall = TP/(TP+FN)
precision = TP /(TP+FP)
# Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.
# HINT: The formula above can be written as (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)
beta = 0.5
fscore = (1+ beta**2) * (precision * recall) / ((beta**2 * precision) + recall)
# Print the results
print "Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore)
# ### Supervised Learning Models
# **The following are some of the supervised learning models that are currently available in** [`scikit-learn`](http://scikit-learn.org/stable/supervised_learning.html) **that you may choose from:**
# - Gaussian Naive Bayes (GaussianNB)
# - Decision Trees
# - Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)
# - K-Nearest Neighbors (KNeighbors)
# - Stochastic Gradient Descent Classifier (SGDC)
# - Support Vector Machines (SVM)
# - Logistic Regression
# ### Question 2 - Model Application
# List three of the supervised learning models above that are appropriate for this problem that you will test on the census data. For each model chosen
#
# - Describe one real-world application in industry where the model can be applied.
# - What are the strengths of the model; when does it perform well?
# - What are the weaknesses of the model; when does it perform poorly?
# - What makes this model a good candidate for the problem, given what you know about the data?
#
# ** HINT: **
#
# Structure your answer in the same format as above^, with 4 parts for each of the three models you pick. Please include references with your answer.
# **Answer: **
#
# The following three models are picked.
#
# Gaussian Naive Bayes (GaussianNB)
# - GaussianNB can be applied to document classification and spam email filtering. For example, Gaussian NB takes term-document matrix of emails as the input and classify emails into spam and non-spam.
# - The strengths of GaussianNB are that it requires small size of training data to determine necessary parameters and is very fast, not suffering “curse of dimensionality” (SLUG).
# - Although GaussianNB is a good classifier, it can not provide good estimates of probabilities (SLUG), which is one of the weaknesses. Another weakness is that GaussianNB requires a strong conditional independence assumption on the attributes in the model (Class notes; SLUG).
# - As our classification problem has many input variables and observations, GaussianNB with high efficiency and capability of handling high dimensionality is a good candidate.
#
# Support Vector Machines (SVM)
# - SVM can be used in the imaging application of detecting human face. SVM discovers a squared boundary around face and classifies the images as with-face or without-face.
# - SVM is effective in high dimensional spaces and memory efficient; it can adopt different Kernel functions for the dicision function (SLUG).
# - The weakness of SVM is that its accuracy in terms of over-fitting is sensitive to the Kernel functions and regularization term if there are too many features (SLUG). Another weakness of SVM is that it do not provide probability estimates (SLUG).
# - Similar to GaussianNB, SVM with high efficiency and capability of handling high dimensionality is a good candidate, as our classification problem has many input variables and observations.
#
# Ensemble Methods (AdaBoost)
# - Industrial applications of AdaBoost includes document classification and face detection.
# - As the strength, AdaBoost is efficient and setting parameters for AdaBoost is easy (Class notes, ESL). As a kind of AdaBoost, boosting tree methods is highly accurate and is capable of handling irrelevant features (Class notes, ESL).
# - As the weakness, AdaBoost is sensitive to data’s noise and needs enough data for fitting (Class notes, ESL).
# - Our classification problem has enough data and many features, part of which may be irrelevant for fitting. Hence, AdaBoost (Boosting tree) is a good candidate for our problem.
#
# Reference:
# - Class notes.
# - Friedman, Jerome, Trevor Hastie, and Robert Tibshirani. The elements of statistical learning. Vol. 1. New York: Springer series in statistics, 2001. (ESL)
# - Scikit-Learn User Guide (SLUG), Release 0.20.dev0.
#
# ### Implementation - Creating a Training and Predicting Pipeline
# To properly evaluate the performance of each model you've chosen, it's important that you create a training and predicting pipeline that allows you to quickly and effectively train models using various sizes of training data and perform predictions on the testing data. Your implementation here will be used in the following section.
# In the code block below, you will need to implement the following:
# - Import `fbeta_score` and `accuracy_score` from [`sklearn.metrics`](http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics).
# - Fit the learner to the sampled training data and record the training time.
# - Perform predictions on the test data `X_test`, and also on the first 300 training points `X_train[:300]`.
# - Record the total prediction time.
# - Calculate the accuracy score for both the training subset and testing set.
# - Calculate the F-score for both the training subset and testing set.
# - Make sure that you set the `beta` parameter!
# In[9]:
# TODO: Import two metrics from sklearn - fbeta_score and accuracy_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
start = time() # Get start time
learner = learner()
learner.fit(X_train[0:sample_size],y_train[:sample_size])
end = time() # Get end time
# TODO: Calculate the training time
results['train_time'] = start - end
# TODO: Get the predictions on the test set(X_test),
# then get predictions on the first 300 training samples(X_train) using .predict()
start = time() # Get start time
predictions_test = learner.predict(X_test)
predictions_train = learner.predict(X_train[:300])
end = time() # Get end time
# TODO: Calculate the total prediction time
results['pred_time'] = end - start
# TODO: Compute accuracy on the first 300 training samples which is y_train[:300]
results['acc_train'] = accuracy_score(y_train[:300],predictions_train)
# TODO: Compute accuracy on test set using accuracy_score()
results['acc_test'] = accuracy_score(y_test,predictions_test)
# TODO: Compute F-score on the the first 300 training samples using fbeta_score()
results['f_train'] = fbeta_score(y_train[:300],predictions_train, beta=0.5)
# TODO: Compute F-score on the test set which is y_test
results['f_test'] = fbeta_score(y_test,predictions_test, beta=0.5)
# Success
print "{} trained on {} samples.".format(learner.__class__.__name__, sample_size)
# Return the results
return results
# ### Implementation: Initial Model Evaluation
# In the code cell, you will need to implement the following:
# - Import the three supervised learning models you've discussed in the previous section.
# - Initialize the three models and store them in `'clf_A'`, `'clf_B'`, and `'clf_C'`.
# - Use a `'random_state'` for each model you use, if provided.
# - **Note:** Use the default settings for each model — you will tune one specific model in a later section.
# - Calculate the number of records equal to 1%, 10%, and 100% of the training data.
# - Store those values in `'samples_1'`, `'samples_10'`, and `'samples_100'` respectively.
#
# **Note:** Depending on which algorithms you chose, the following implementation may take some time to run!
# In[15]:
# TODO: Import the three supervised learning models from sklearn
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier
# TODO: Initialize the three models
clf_A = GaussianNB
clf_B = svm.SVC
clf_C = AdaBoostClassifier
# TODO: Calculate the number of samples for 1%, 10%, and 100% of the training data
# HINT: samples_100 is the entire training set i.e. len(y_train)
# HINT: samples_10 is 10% of samples_100
# HINT: samples_1 is 1% of samples_100
samples_100 = len(y_train)
samples_10 = int(len(y_train) * 0.1)
samples_1 = int(len(y_train) * 0.01)
# Collect results on the learners
results = {}
for clf in [clf_A, clf_B, clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = train_predict(clf, samples, X_train, y_train, X_test, y_test)
# Run metrics visualization for the three supervised learning models chosen
vs.evaluate(results, accuracy, fscore)
|
leizhipeng/ml
|
finding_donors/finding_donors.py
|
Python
|
gpl-3.0
| 28,896
|
[
"Gaussian"
] |
e151dcc5e97e0fa227b12b412939254ab7a83dd54774974734bbcf71d9392a5e
|
#!/usr/bin/env python
"""
Status of DIRAC components using runsvstat utility
Example:
$ dirac-status-component
DIRAC Root Path = /vo/dirac/versions/Lyon-HEAD-1296215324
Name : Runit Uptime PID
WorkloadManagement_PilotStatusAgent : Run 4029 1697
WorkloadManagement_JobHistoryAgent : Run 4029 167
"""
from DIRAC.Core.Base.Script import Script
@Script()
def main():
Script.disableCS()
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(
" System: Name of the system for the component (default *: all)", mandatory=False, default="*"
)
Script.registerArgument(
(
"Service: Name of the particular component (default *: all)",
"Agent: Name of the particular component (default *: all)",
),
mandatory=False,
default="*",
)
_, args = Script.parseCommandLine()
system, component = Script.getPositionalArgs(group=True)
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
if len(args) > 2:
Script.showHelp(exitCode=1)
if len(args) > 0:
system = args[0]
if system != "*":
if len(args) > 1:
component = args[1]
#
gComponentInstaller.exitOnError = True
#
result = gComponentInstaller.getStartupComponentStatus([system, component])
if not result["OK"]:
print("ERROR:", result["Message"])
exit(-1)
gComponentInstaller.printStartupStatus(result["Value"])
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_status_component.py
|
Python
|
gpl-3.0
| 1,658
|
[
"DIRAC"
] |
29b3584b21cb66e79534b05b14736797b305e60591d894e27b5dc61fab6521f4
|
"""Tools for managing evaluation contexts. """
from sympy.utilities.iterables import dict_merge
from sympy.core.basic import PicklableWithSlots
__known_options__ = set(['frac', 'gens', 'wrt', 'sort', 'order', 'domain',
'modulus', 'gaussian', 'extension', 'field', 'greedy', 'symmetric'])
__global_options__ = []
__template__ = """\
def %(option)s(_%(option)s):
return Context(%(option)s=_%(option)s)
"""
for option in __known_options__:
exec __template__ % { 'option': option }
class Context(PicklableWithSlots):
__slots__ = ['__options__']
def __init__(self, dict=None, **options):
if dict is not None:
self.__options__ = dict_merge(dict, options)
else:
self.__options__ = options
def __getattribute__(self, name):
if name in __known_options__:
try:
return object.__getattribute__(self, '__options__')[name]
except KeyError:
return None
else:
return object.__getattribute__(self, name)
def __str__(self):
return 'Context(%s)' % ', '.join(
[ '%s=%r' % (key, value) for key, value in self.__options__.iteritems() ])
def __and__(self, other):
if isinstance(other, Context):
return Context(**dict_merge(self.__options__, other.__options__))
else:
raise TypeError("a context manager expected, got %s" % other)
def __enter__(self):
raise NotImplementedError('global context')
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError('global context')
def register_context(func):
def wrapper(self, *args, **kwargs):
return func(*args, **dict_merge(self.__options__, kwargs))
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
setattr(Context, func.__name__, wrapper)
return func
|
ichuang/sympy
|
sympy/polys/polycontext.py
|
Python
|
bsd-3-clause
| 1,887
|
[
"Gaussian"
] |
572775101f63dda6e98a6250c509913fd022116b59aa922c1ed6dd23a34c7f93
|
"""
Test courseware search
"""
import os
import json
import uuid
from ..helpers import remove_file
from ...pages.common.logout import LogoutPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware_search import CoursewareSearchPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.course import XBlockFixtureDesc
from nose.plugins.attrib import attr
from ..studio.base_studio_test import ContainerBase
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ...fixtures import LMS_BASE_URL
from ...pages.studio.component_editor import ComponentVisibilityEditorView
from ...pages.lms.instructor_dashboard import InstructorDashboardPage
from bok_choy.promise import EmptyPromise
@attr('shard_1')
class CoursewareSearchCohortTest(ContainerBase):
"""
Test courseware search.
"""
USERNAME = 'STUDENT_TESTER'
EMAIL = 'student101@example.com'
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self, is_staff=True):
"""
Create search page and course content to search
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
self.addCleanup(remove_file, self.TEST_INDEX_FILENAME)
super(CoursewareSearchCohortTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.content_group_a = "Content Group A"
self.content_group_b = "Content Group B"
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_" + str(uuid.uuid4().hex)[:12]
self.cohort_a_student_email = self.cohort_a_student_username + "@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_" + str(uuid.uuid4().hex)[:12]
self.cohort_b_student_email = self.cohort_b_student_username + "@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
self.courseware_search_page = CoursewareSearchPage(self.browser, self.course_id)
# Enable Cohorting and assign cohorts and content groups
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_html_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self._studio_reindex()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
StudioAutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _studio_reindex(self):
"""
Reindex course content on studio course page
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.course_outline.visit()
self.course_outline.start_reindex()
self.course_outline.wait_for_ajax()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_search_page.visit()
staff_page = StaffPage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_html = 'GROUPACONTENT'
self.group_b_html = 'GROUPBCONTENT'
self.group_a_and_b_html = 'GROUPAANDBCONTENT'
self.visible_to_all_html = 'VISIBLETOALLCONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', self.group_a_html, data='<html>GROUPACONTENT</html>'),
XBlockFixtureDesc('html', self.group_b_html, data='<html>GROUPBCONTENT</html>'),
XBlockFixtureDesc('html', self.group_a_and_b_html, data='<html>GROUPAANDBCONTENT</html>'),
XBlockFixtureDesc('html', self.visible_to_all_html, data='<html>VISIBLETOALLCONTENT</html>')
)
)
)
)
def enable_cohorting(self, course_fixture):
"""
Enables cohorting for the current course.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': True})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to enable cohorts")
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_html_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing html to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(html_block_index, content_group, second_content_group=None):
"""
Set visibility on html blocks to specified groups.
"""
html_block = container_page.xblocks[html_block_index]
html_block.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, html_block.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, html_block.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
"""
Create cohort and assign student to it.
"""
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
cohort_management_page.wait_for_ajax()
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
def test_cohorted_search_user_a_a_content(self):
"""
Test user can search content restricted to his cohort.
"""
self._auto_auth(self.cohort_a_student_username, self.cohort_a_student_email, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_b_a_content(self):
"""
Test user can not search content restricted to his cohort.
"""
self._auto_auth(self.cohort_b_student_username, self.cohort_b_student_email, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html not in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_c_ab_content(self):
"""
Test user not enrolled in any cohorts can't see any of restricted content.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html not in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_c_all_content(self):
"""
Test user can search public content if cohorts used on course.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_staff_all_content(self):
"""
Test staff user can search all public content if cohorts used on course.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Staff')
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_b_html)
assert self.group_b_html in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_staff_masquerade_student_content(self):
"""
Test staff user can search just student public content if selected from preview menu.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Student')
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html not in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html not in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_b_html)
assert self.group_b_html not in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_staff_masquerade_cohort_content(self):
"""
Test staff user can search cohort and public content if selected from preview menu.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Student in ' + self.content_group_a)
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_b_html)
assert self.group_b_html not in self.courseware_search_page.search_results.html[0]
|
adoosii/edx-platform
|
common/test/acceptance/tests/lms/test_lms_cohorted_courseware_search.py
|
Python
|
agpl-3.0
| 14,005
|
[
"VisIt"
] |
def31e25c0e12a2341aab74d216cf95442d9ed0e2e450c8f353d9a464142d9c4
|
"""
sphinx.ext.apidoc
~~~~~~~~~~~~~~~~~
Parses a directory tree looking for Python modules and packages and creates
ReST files appropriately to create code documentation with Sphinx. It also
creates a modules index (named modules.<suffix>).
This is derived from the "sphinx-autopackage" script, which is:
Copyright 2008 Société des arts technologiques (SAT),
https://sat.qc.ca/
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import glob
import locale
import os
import sys
from fnmatch import fnmatch
from os import path
import sphinx.locale
from sphinx import __display_version__, package_dir
from sphinx.cmd.quickstart import EXTENSIONS
from sphinx.locale import __
from sphinx.util import rst
from sphinx.util.osutil import FileAvoidWrite, ensuredir
if False:
# For type annotation
from typing import Any, List, Tuple # NOQA
# automodule options
if 'SPHINX_APIDOC_OPTIONS' in os.environ:
OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')
else:
OPTIONS = [
'members',
'undoc-members',
# 'inherited-members', # disabled because there's a bug in sphinx
'show-inheritance',
]
INITPY = '__init__.py'
PY_SUFFIXES = {'.py', '.pyx'}
def makename(package, module):
# type: (str, str) -> str
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(name, text, opts):
# type: (str, str, Any) -> None
"""Write the output file for module/package <name>."""
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
print(__('Would create file %s.') % fname)
return
if not opts.force and path.isfile(fname):
print(__('File %s already exists, skipping.') % fname)
else:
print(__('Creating file %s.') % fname)
with FileAvoidWrite(fname) as f:
f.write(text)
def format_heading(level, text, escape=True):
# type: (int, str, bool) -> str
"""Create a heading of <level> [1, 2 or 3 supported]."""
if escape:
text = rst.escape(text)
underlining = ['=', '-', '~', ][level - 1] * len(text)
return '%s\n%s\n\n' % (text, underlining)
def format_directive(module, package=None):
# type: (str, str) -> str
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
directive += ' :%s:\n' % option
return directive
def create_module_file(package, module, opts):
# type: (str, str, Any) -> None
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
else:
text = ''
# text += format_heading(2, ':mod:`%s` Module' % module)
text += format_directive(module, package)
write_file(makename(package, module), text, opts)
def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace, excludes=[]): # NOQA
# type: (str, str, str, List[str], Any, List[str], bool, List[str]) -> None
"""Build the text of the file and write the file."""
text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
% makename(master_package, subroot))
if opts.modulefirst and not is_namespace:
text += format_directive(subroot, master_package)
text += '\n'
# build a list of directories that are szvpackages (contain an INITPY file)
# and also checks the INITPY file is not empty, or there are other python
# source files in that folder.
# (depending on settings - but shall_skip() takes care of that)
subs = [sub for sub in subs if not
shall_skip(path.join(root, sub, INITPY), opts, excludes)]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
submods = [path.splitext(sub)[0] for sub in py_files
if not shall_skip(path.join(root, sub), opts, excludes) and
sub != INITPY]
if submods:
text += format_heading(2, 'Submodules')
if opts.separatemodules:
text += '.. toctree::\n\n'
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
text += ' %s\n' % modfile
# generate separate file for this module
if not opts.noheadings:
filetext = format_heading(1, '%s module' % modfile)
else:
filetext = ''
filetext += format_directive(makename(subroot, submod),
master_package)
write_file(modfile, filetext, opts)
else:
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
if not opts.noheadings:
text += format_heading(2, '%s module' % modfile)
text += format_directive(makename(subroot, submod),
master_package)
text += '\n'
text += '\n'
if not opts.modulefirst and not is_namespace:
text += format_heading(2, 'Module contents')
text += format_directive(subroot, master_package)
write_file(makename(master_package, subroot), text, opts)
def create_modules_toc_file(modules, opts, name='modules'):
# type: (List[str], Any, str) -> None
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header, escape=False)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts)
def shall_skip(module, opts, excludes=[]):
# type: (str, Any, List[str]) -> bool
"""Check if we want to skip this module."""
# skip if the file doesn't exist and not using implicit namespaces
if not opts.implicit_namespaces and not path.exists(module):
return True
# Are we a package (here defined as __init__.py, not the folder in itself)
if os.path.basename(module) == INITPY:
# Yes, check if we have any non-excluded modules at all here
all_skipped = True
basemodule = path.dirname(module)
for submodule in glob.glob(path.join(basemodule, '*.py')):
if not is_excluded(path.join(basemodule, submodule), excludes):
# There's a non-excluded module here, we won't skip
all_skipped = False
if all_skipped:
return True
# skip if it has a "private" name and this is selected
filename = path.basename(module)
if filename != '__init__.py' and filename.startswith('_') and \
not opts.includeprivate:
return True
return False
def recurse_tree(rootpath, excludes, opts):
# type: (str, List[str], Any) -> List[str]
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
followlinks = getattr(opts, 'followlinks', False)
includeprivate = getattr(opts, 'includeprivate', False)
implicit_namespaces = getattr(opts, 'implicit_namespaces', False)
# check if the base directory is a package and get its name
if INITPY in os.listdir(rootpath) or implicit_namespaces:
root_package = rootpath.split(path.sep)[-1]
else:
# otherwise, the base is a directory with packages
root_package = None
toplevels = []
for root, subs, files in os.walk(rootpath, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if path.splitext(f)[1] in PY_SUFFIXES and
not is_excluded(path.join(root, f), excludes))
is_pkg = INITPY in py_files
is_namespace = INITPY not in py_files and implicit_namespaces
if is_pkg:
py_files.remove(INITPY)
py_files.insert(0, INITPY)
elif root != rootpath:
# only accept non-package at toplevel unless using implicit namespaces
if not implicit_namespaces:
del subs[:]
continue
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if includeprivate:
exclude_prefixes = ('.',) # type: Tuple[str, ...]
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
not is_excluded(path.join(root, sub), excludes))
if is_pkg or is_namespace:
# we are in a package with something to document
if subs or len(py_files) > 1 or not shall_skip(path.join(root, INITPY), opts):
subpackage = root[len(rootpath):].lstrip(path.sep).\
replace(path.sep, '.')
# if this is not a namespace or
# a namespace and there is something there to document
if not is_namespace or len(py_files) > 0:
create_package_file(root, root_package, subpackage,
py_files, opts, subs, is_namespace, excludes)
toplevels.append(makename(root_package, subpackage))
else:
# if we are at the root level, we don't require it to be a package
assert root == rootpath and root_package is None
for py_file in py_files:
if not shall_skip(path.join(rootpath, py_file), opts):
module = path.splitext(py_file)[0]
create_module_file(root_package, module, opts)
toplevels.append(module)
return toplevels
def is_excluded(root, excludes):
# type: (str, List[str]) -> bool
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exclude "foo" also accidentally excluding "foobar".
"""
for exclude in excludes:
if fnmatch(root, exclude):
return True
return False
def get_parser():
# type: () -> argparse.ArgumentParser
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> '
'[EXCLUDE_PATTERN, ...]',
epilog=__('For more information, visit <http://sphinx-doc.org/>.'),
description=__("""
Look recursively in <MODULE_PATH> for Python modules and packages and create
one reST file with automodule directives per package in the <OUTPUT_PATH>.
The <EXCLUDE_PATTERN>s can be file and/or directory patterns that will be
excluded from generation.
Note: By default this script will not overwrite already created files."""))
parser.add_argument('--version', action='version', dest='show_version',
version='%%(prog)s %s' % __display_version__)
parser.add_argument('module_path',
help=__('path to module to document'))
parser.add_argument('exclude_pattern', nargs='*',
help=__('fnmatch-style file and/or directory patterns '
'to exclude from generation'))
parser.add_argument('-o', '--output-dir', action='store', dest='destdir',
required=True,
help=__('directory to place all output'))
parser.add_argument('-d', '--maxdepth', action='store', dest='maxdepth',
type=int, default=4,
help=__('maximum depth of submodules to show in the TOC '
'(default: 4)'))
parser.add_argument('-f', '--force', action='store_true', dest='force',
help=__('overwrite existing files'))
parser.add_argument('-l', '--follow-links', action='store_true',
dest='followlinks', default=False,
help=__('follow symbolic links. Powerful when combined '
'with collective.recipe.omelette.'))
parser.add_argument('-n', '--dry-run', action='store_true', dest='dryrun',
help=__('run the script without creating files'))
parser.add_argument('-e', '--separate', action='store_true',
dest='separatemodules',
help=__('put documentation for each module on its own page'))
parser.add_argument('-P', '--private', action='store_true',
dest='includeprivate',
help=__('include "_private" modules'))
parser.add_argument('--tocfile', action='store', dest='tocfile', default='modules',
help=__("filename of table of contents (default: modules)"))
parser.add_argument('-T', '--no-toc', action='store_false', dest='tocfile',
help=__("don't create a table of contents file"))
parser.add_argument('-E', '--no-headings', action='store_true',
dest='noheadings',
help=__("don't create headings for the module/package "
"packages (e.g. when the docstrings already "
"contain them)"))
parser.add_argument('-M', '--module-first', action='store_true',
dest='modulefirst',
help=__('put module documentation before submodule '
'documentation'))
parser.add_argument('--implicit-namespaces', action='store_true',
dest='implicit_namespaces',
help=__('interpret module paths according to PEP-0420 '
'implicit namespaces specification'))
parser.add_argument('-s', '--suffix', action='store', dest='suffix',
default='rst',
help=__('file suffix (default: rst)'))
parser.add_argument('-F', '--full', action='store_true', dest='full',
help=__('generate a full project with sphinx-quickstart'))
parser.add_argument('-a', '--append-syspath', action='store_true',
dest='append_syspath',
help=__('append module_path to sys.path, used when --full is given'))
parser.add_argument('-H', '--doc-project', action='store', dest='header',
help=__('project name (default: root module name)'))
parser.add_argument('-A', '--doc-author', action='store', dest='author',
help=__('project author(s), used when --full is given'))
parser.add_argument('-V', '--doc-version', action='store', dest='version',
help=__('project version, used when --full is given'))
parser.add_argument('-R', '--doc-release', action='store', dest='release',
help=__('project release, used when --full is given, '
'defaults to --doc-version'))
group = parser.add_argument_group(__('extension options'))
group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
action='append', help=__('enable arbitrary extensions'))
for ext in EXTENSIONS:
group.add_argument('--ext-%s' % ext, action='append_const',
const='sphinx.ext.%s' % ext, dest='extensions',
help=__('enable %s extension') % ext)
return parser
def main(argv=sys.argv[1:]):
# type: (List[str]) -> int
"""Parse and check the command line arguments."""
sphinx.locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')
parser = get_parser()
args = parser.parse_args(argv)
rootpath = path.abspath(args.module_path)
# normalize opts
if args.header is None:
args.header = rootpath.split(path.sep)[-1]
if args.suffix.startswith('.'):
args.suffix = args.suffix[1:]
if not path.isdir(rootpath):
print(__('%s is not a directory.') % rootpath, file=sys.stderr)
sys.exit(1)
if not args.dryrun:
ensuredir(args.destdir)
excludes = [path.abspath(exclude) for exclude in args.exclude_pattern]
modules = recurse_tree(rootpath, excludes, args)
if args.full:
from sphinx.cmd import quickstart as qs
modules.sort()
prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
d = {
'path': args.destdir,
'sep': False,
'dot': '_',
'project': args.header,
'author': args.author or 'Author',
'version': args.version or '',
'release': args.release or args.version or '',
'suffix': '.' + args.suffix,
'master': 'index',
'epub': True,
'extensions': ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.todo'],
'makefile': True,
'batchfile': True,
'make_mode': True,
'mastertocmaxdepth': args.maxdepth,
'mastertoctree': text,
'language': 'en',
'module_path': rootpath,
'append_syspath': args.append_syspath,
}
if args.extensions:
d['extensions'].extend(args.extensions)
for ext in d['extensions'][:]:
if ',' in ext:
d['extensions'].remove(ext)
d['extensions'].extend(ext.split(','))
if not args.dryrun:
qs.generate(d, silent=True, overwrite=args.force)
elif args.tocfile:
create_modules_toc_file(modules, args, args.tocfile)
return 0
# So program can be started with "python -m sphinx.apidoc ..."
if __name__ == "__main__":
main()
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/sphinx/ext/apidoc.py
|
Python
|
mit
| 18,689
|
[
"VisIt"
] |
1dab6220b857421526a7594b43faaf3c03ab8a5a3cd6c3539c067f09c9c283d6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import csv
import collections
import itertools
from io import open
import math
from six.moves import zip
import warnings
from monty.json import MSONable, MontyDecoder
from monty.string import unicode2str
from monty.functools import lru_cache
from monty.dev import deprecated
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element, DummySpecie, get_el_sp
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.analysis.reaction_calculator import Reaction, \
ReactionError
"""
This module defines tools to generate and analyze phase diagrams.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2011"
class PDEntry(MSONable):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
Args:
comp: Composition as a pymatgen.core.structure.Composition
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula.
attribute: Optional attribute of the entry. This can be used to
specify that the entry is a newly found compound, or to specify a
particular label for the entry, or else ... Used for further
analysis and plotting purposes. An attribute can be anything
but must be MSONable.
"""
def __init__(self, composition, energy, name=None, attribute=None):
self.energy = energy
self.composition = Composition(composition)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy_per_atom(self):
"""
Returns the final energy per atom.
"""
return self.energy / self.composition.num_atoms
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.composition.is_element
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"composition": self.composition.as_dict(),
"energy": self.energy,
"name": self.name,
"attribute": self.attribute}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
def __hash__(self):
return id(self)
@classmethod
def from_dict(cls, d):
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
@staticmethod
def to_csv(filename, entries, latexify_names=False):
"""
Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O
"""
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements), key=lambda a: a.X)
writer = csv.writer(open(filename, "wb"), delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + elements + ["Energy"])
for entry in entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
row.extend([entry.composition[el] for el in elements])
row.append(entry.energy)
writer.writerow(row)
@staticmethod
def from_csv(filename):
"""
Imports PDEntries from a csv.
Args:
filename: Filename to import from.
Returns:
List of Elements, List of PDEntries
"""
with open(filename, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
for row in reader:
if not header_read:
elements = row[1:(len(row) - 1)]
header_read = True
else:
name = row[0]
energy = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 1):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
entries.append(PDEntry(Composition(comp), energy, name))
elements = [Element(el) for el in elements]
return elements, entries
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
def __init__(self, entry, chempots, name=None):
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super(GrandPotPDEntry, self).__init__(new_comp_map, grandpot,
entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
Args:
comp: Transformed composition as a Composition.
energy: Energy for composition.
original_entry: Original entry that this entry arose from.
"""
def __init__(self, comp, original_entry):
super(TransformedPDEntry, self).__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}"
.format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
class PhaseDiagram(MSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: simplices:
The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = list(elements)
dim = len(elements)
get_reduced_comp = lambda e: e.composition.reduced_composition
entries = sorted(entries, key=get_reduced_comp)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=get_reduced_comp):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
raise PhaseDiagramError(
"There are no entries associated with a terminal element!.")
data = np.array([
[e.composition.get_atomic_fraction(el) for el in elements] + [
e.energy_per_atom]
for e in min_entries
])
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = all_entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
self._stable_entries = set(self.qhull_entries[i] for i in
set(itertools.chain(*self.facets)))
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
"""
if set(comp.elements).difference(self.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self.elements))
return np.array(
[comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
data = []
for entry in self.all_entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in self.elements]
row.append(entry.energy_per_atom)
data.append(row)
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
return self._stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = [PDEntry.from_dict(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in
complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
element = get_el_sp(element)
element = Element(element.symbol)
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of the simplex
in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition except dep_elt.
The chemical potential of dep_elt is computed from the target composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != dep_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / \
target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials correspoding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super(GrandPotentialPhaseDiagram, self).__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super(CompoundPhaseDiagram, self).__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
# We only allow reactions that have positive amounts of
# reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminals}
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
class PDPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: PhaseDiagram object.
show_unstable (float): Whether unstable phases will be plotted as
well as red crosses. If a number > 0 is entered, all phases with
ehull < show_unstable will be shown.
\\*\\*plotkwargs: Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
def __init__(self, phasediagram, show_unstable=0, **plotkwargs):
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3
}
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False):
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def plot_element_profile(self, element, comp, show_label_index=None, xlim=5):
"""
Draw the element profile plot for a composition varying different chemical
potential of an element.
X value is the negative value of the chemical potential reference to elemental
chemical potential. For example, if choose Element("Li"), X= -(µLi-µLi0),
which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the profile steps
you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is considered.
It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products you
want to show in the plot. Default to None (not showing any annotation for
reaction products). For the profile steps you want to show the labels,
just add it to the show_label_index. The profile step counts from zero.
For example, you can set show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to 5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"])
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt
def show(self, *args, **kwargs):
"""
Draws the phase diagram using Matplotlib and show it.
Args:
\\*args: Passed to get_plot.
\\*\\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
ticks = cbar.ax.get_yticklabels()
# cbar.ax.set_yticklabels(['${v}$'.format(
# v=float(t.get_text().strip('$'))*1000.0) for t in ticks])
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", **kwargs):
"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
\\*\\*kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(
elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and \
sum([comp.get_atomic_fraction(el) for el in
elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (not nameup in ordering) or (not nameright in ordering) or \
(not nameleft in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
|
johnson1228/pymatgen
|
pymatgen/analysis/phase_diagram.py
|
Python
|
mit
| 77,867
|
[
"pymatgen"
] |
177883f020404416ac3f1748ab5470e189864c94159845a0767d77d73322c870
|
__author__ = 'brian'
import sys
my_paths = [
'/usr/local/pbs/default/python/lib/python2.5',
'/usr/local/pbs/default/python/lib/python2.5/plat-linux2',
'/usr/local/pbs/default/python/lib/python2.5/lib-tk',
'/usr/local/pbs/default/python/lib/python2.5/lib-dynload',
'/usr/local/pbs/default/python/lib/python2.5/site-packages',
]
for my_path in my_paths:
if my_path not in sys.path:
sys.path.append(my_path)
if "/usr/lib64/python2.6" in sys.path:
sys.path.remove("/usr/lib64/python2.6")
import encodings.ascii
import pbs
try:
# Python 3
import configparser
except ImportError:
# Python 2
import ConfigParser as configparser
try:
# Python 3
import xmlrpc.client as xmlrpclib
except ImportError:
# Python 2
import xmlrpclib
e = pbs.event()
try:
config_file = "/etc/karaage3/karaage-cluster-tools.cfg"
f = open(config_file, "r")
f.close()
config = configparser.RawConfigParser()
config.read(config_file)
username = config.get('karaage', 'username')
password = config.get('karaage', 'password')
url = config.get('karaage', 'url')
server = xmlrpclib.Server(url)
if e.job.project is None:
e.reject(
"The project has not been supplied. Please specify "
"project with '-P <project>'.")
project = str(e.job.project)
members = server.get_project_members(username, password, project)
if isinstance(members, str):
e.reject(
"The project %s is invalid." % project)
if e.requestor not in members:
e.reject(
"User %s is not a member of project %s." % (e.requestor, project))
assert "," not in project
e.job.group_list = pbs.group_list(project)
except SystemExit:
pass
except:
# import traceback
# traceback.print_exc()
e.reject(
"%s hook failed with %s. Please contact Admin."
% (e.hook_name, sys.exc_info()[:2]))
|
Karaage-Cluster/karaage-hacks
|
require_project.py
|
Python
|
gpl-3.0
| 1,951
|
[
"Brian"
] |
b8cecef30db8e8695b6bab105cbe2a35f40ae4ad12d909e5bf59c99c161f9121
|
# Copyright (C) 2019 Brian McMaster
# Copyright (C) 2019 Open Source Integrators
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class TestL10nUsForm1099(TransactionCase):
def test_on_change_is_1099(self):
"""
Test that supplier is True if is_1099 is True
"""
partner = self.env.ref('base.res_partner_2')
partner.is_1099 = True
partner._on_change_is_1099()
self.assertTrue(partner.supplier)
def test_on_change_supplier(self):
"""
Test that is_1099 is False if supplier is False
"""
partner = self.env.ref('base.res_partner_2')
partner.supplier = False
partner._on_change_supplier()
self.assertFalse(partner.is_1099)
|
OCA/l10n-usa
|
l10n_us_form_1099/tests/test_l10n_us_form_1099.py
|
Python
|
agpl-3.0
| 811
|
[
"Brian"
] |
49c7494c17be3868ab389a553ac5c5e44e7d28e00effc31e5e3fd2e696bb1cff
|
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b=Building.objects.create(name='101')
dev1=Device.objects.create(name="router", building=b)
dev2=Device.objects.create(name="switch", building=b)
dev3=Device.objects.create(name="server", building=b)
port1=Port.objects.create(port_number='4',device=dev1)
port2=Port.objects.create(port_number='7',device=dev2)
port3=Port.objects.create(port_number='1',device=dev3)
c1=Connection.objects.create(start=port1, end=port2)
c2=Connection.objects.create(start=port2, end=port3)
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections=Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person = usp)
o = Organizer.objects.create(person = uop)
c = Class.objects.create(org=o)
e = Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
i1 = Item.objects.create(name="item1", child=c1)
i2 = Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
c1 = Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
sc1 = SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
|
waseem18/oh-mainline
|
vendor/packages/Django/tests/regressiontests/select_related_regress/tests.py
|
Python
|
agpl-3.0
| 6,599
|
[
"Brian"
] |
0c61c11a4505fc96d94074c7db52ee43636f3ee06e85790a28c512dcc509d28f
|
"""pyMOOSE
Python bindings of MOOSE simulator.
References:
-----------
- `Documentation https://moose.readthedocs.io/en/latest/`
- `Development https://github.com/BhallaLab/moose-core`
"""
# Notes
# -----
# Use these guidelines for docstring: https://numpydoc.readthedocs.io/en/latest/format.html
import sys
import pydoc
import os
import moose._moose as _moose
from moose import model_utils
__moose_classes__ = {}
class melement(_moose.ObjId):
"""Base class for all moose classes.
"""
__type__ = "UNKNOWN"
__doc__ = ""
def __init__(self, x, ndata=1, **kwargs):
obj = _moose.__create__(self.__type__, x, ndata)
if sys.version_info.major > 2:
super().__init__(obj)
for k, v in kwargs.items():
super().setField(k, v)
else:
# Support for dead python2.
super(melement, self).__init__(obj)
for k, v in kwargs.items():
super(melement, self).setField(k, v)
def __to_melement(obj):
global __moose_classes__
mc = __moose_classes__[obj.type](obj)
return mc
# Create MOOSE classes from available Cinfos.
for p in _moose.wildcardFind("/##[TYPE=Cinfo]"):
if sys.version_info.major > 2:
cls = type(
p.name,
(melement,),
{"__type__": p.name, "__doc__": _moose.__generatedoc__(p.name)},
)
else:
# Python2.
cls = type(
str(p.name),
(melement,),
{"__type__": p.name, "__doc__": _moose.__generatedoc__(p.name)},
)
setattr(_moose, cls.__name__, cls)
__moose_classes__[cls.__name__] = cls
# Import all attributes to global namespace. We must do it here after adding
# class types to _moose.
from moose._moose import *
def version():
"""Reutrns moose version string."""
return _moose.__version__
__version__ = version()
def version_info():
"""Return detailed version information.
>>> moose.version_info()
{'build_datetime': 'Friday Fri Apr 17 22:13:00 2020',
'compiler_string': 'GNU,/usr/bin/c++,7.5.0',
'major': '3',
'minor': '3',
'patch': '1'}
"""
return _moose.version_info()
def about():
"""general information about pyMOOSE.
Returns
-------
A dict
Example
-------
>>> moose.about()
{'path': '~/moose-core/_build/python/moose',
'version': '4.0.0.dev20200417',
'docs': 'https://moose.readthedocs.io/en/latest/',
'development': 'https://github.com/BhallaLab/moose-core'}
"""
return dict(
path=os.path.dirname(__file__),
version=_moose.__version__,
docs="https://moose.readthedocs.io/en/latest/",
development="https://github.com/BhallaLab/moose-core",
)
def wildcardFind(pattern):
"""Find objects using wildcard pattern
Parameters
----------
pattern: str
Wildcard (see note below)
.. note:: Wildcard
MOOSE allows wildcard expressions of the form
{PATH}/{WILDCARD}[{CONDITION}].
{PATH} is valid path in the element tree, {WILDCARD} can be
# or ##. # causes the search to be restricted to the children
of the element specified by {PATH}. ## makes the search to
recursively go through all the descendants of the {PATH} element.
{CONDITION} can be:
- TYPE={CLASSNAME}: an element satisfies this condition if it is of
class {CLASSNAME}.
- ISA={CLASSNAME}: alias for TYPE={CLASSNAME}
- CLASS={CLASSNAME}: alias for TYPE={CLASSNAME}
- FIELD({FIELDNAME}){OPERATOR}{VALUE} : compare field {FIELDNAME} with
{VALUE} by {OPERATOR} where {OPERATOR} is a comparison
operator (=, !=, >, <, >=, <=).
Returns
-------
list
A list of found MOOSE objects
Examples
--------
Following returns a list of all the objects under /mymodel whose Vm field
is >= -65.
>>> moose.wildcardFind("/mymodel/##[FIELD(Vm)>=-65]")
"""
return [__to_melement(x) for x in _moose.wildcardFind(pattern)]
def connect(src, srcfield, dest, destfield, msgtype="Single"):
"""Create a message between `srcfield` on `src` object to
`destfield` on `dest` object.
This function is used mainly, to say, connect two entities, and
to denote what kind of give-and-take relationship they share.
It enables the 'destfield' (of the 'destobj') to acquire the
data, from 'srcfield'(of the 'src').
Parameters
----------
src : element/vec/string
the source object (or its path) the one that provides information.
srcfield : str
source field on self (type of the information).
destobj : element
Destination object to connect to (The one that need to get
information).
destfield : str
field to connect to on `destobj`
msgtype : str
type of the message. It ca be one of the following (default Single).
- Single
- OneToAll
- AllToOne
- OneToOne
- Reduce
- Sparse
Returns
-------
msgmanager: melement
message-manager for the newly created message.
Note
-----
Alternatively, one can also use the following form::
>>> src.connect(srcfield, dest, destfield, msgtype)
Examples
--------
Connect the output of a pulse generator to the input of a spike generator::
>>> pulsegen = moose.PulseGen('pulsegen')
>>> spikegen = moose.SpikeGen('spikegen')
>>> moose.connect(pulsegen, 'output', spikegen, 'Vm')
Or,
>>> pulsegen.connect('output', spikegen, 'Vm')
"""
src = _moose.element(src)
dest = _moose.element(dest)
return src.connect(srcfield, dest, destfield, msgtype)
def delete(arg):
"""Delete the underlying moose object(s). This does not delete any of the
Python objects referring to this vec but does invalidate them. Any
attempt to access them will raise a ValueError.
Parameters
----------
arg : vec/str/melement
path of the object to be deleted.
Returns
-------
None, Raises ValueError if given path/object does not exists.
"""
_moose.delete(arg)
def element(arg):
"""Convert a path or an object to the appropriate builtin moose class instance
Parameters
----------
arg : str/vec/moose object
path of the moose element to be converted or another element (possibly
available as a superclass instance).
Returns
-------
melement
MOOSE element (object) corresponding to the `arg` converted to write
subclass.
"""
return _moose.element(arg)
def exists(path):
"""Returns True if an object with given path already exists."""
return _moose.exists(path)
def getCwe():
"""Return current working elemement.
See also
--------
moose.setCwe
"""
return _moose.getCwe()
def getField(classname, fieldname):
"""Get specified field of specified class."""
return _moose.getField(classname, fieldname)
def getFieldDict(classname, finfoType=""):
"""Get dictionary of field names and types for specified class.
Parameters
----------
className : str
MOOSE class to find the fields of.
finfoType : str (default '')
Finfo type of the fields to find. If empty or not specified, allfields
will be retrieved.
Returns
-------
dict
field names and their respective types as key-value pair.
Notes
-----
This behaviour is different from `getFieldNames` where only `valueFinfo`s
are returned when `finfoType` remains unspecified.
Examples
--------
List all the source fields on class Neutral
>>> moose.getFieldDict('Neutral', 'srcFinfo')
{'childMsg': 'int'}
"""
return _moose.getFieldDict(classname, finfoType)
def getFieldNames(elem, fieldtype="*"):
"""Get a tuple containing name of fields of a given fieldtype. If
fieldtype is set to '*', all fields are returned.
Parameters
----------
elem : string,obj
Name of the class or a moose element to look up.
fieldtype : string
The kind of field. Possible values are:
- 'valueFinfo' or 'value'
- 'srcFinfo' or 'src'
- 'destFinfo' or 'dest'
- 'lookupFinfo' or 'lookup'
- 'fieldElementFinfo' or 'fieldElement'
Returns
-------
list
Names of the fields of type `finfoType` in class `className`.
"""
clsname = elem if isinstance(elem, str) else elem.className
return _moose.getFieldNames(clsname, fieldtype)
def isRunning():
"""True if the simulation is currently running."""
return _moose.isRunning()
def move(src, dest):
"""Move a moose element `src` to destination"""
return _moose.move(src, dest)
def reinit():
"""Reinitialize simulation.
This function (re)initializes moose simulation. It must be called before
you start the simulation (see moose.start). If you want to continue
simulation after you have called moose.reinit() and moose.start(), you must
NOT call moose.reinit() again. Calling moose.reinit() again will take the
system back to initial setting (like clear out all data recording tables,
set state variables to their initial values, etc.
"""
_moose.reinit()
def start(runtime, notify=False):
"""Run simulation for `t` time. Advances the simulator clock by `t` time. If
'notify = True', a message is written to terminal whenever 10\% of
simulation time is over. \
After setting up a simulation, YOU MUST CALL MOOSE.REINIT() before CALLING
MOOSE.START() TO EXECUTE THE SIMULATION. Otherwise, the simulator behaviour
will be undefined. Once moose.reinit() has been called, you can call
`moose.start(t)` as many time as you like. This will continue the
simulation from the last state for `t` time.
Parameters
----------
t : float
duration of simulation.
notify: bool
default False. If True, notify user whenever 10\% of simultion is over.
Returns
-------
None
See also
--------
moose.reinit : (Re)initialize simulation
"""
_moose.start(runtime, notify)
def stop():
"""Stop simulation"""
_moose.stop()
def setCwe(arg):
"""Set the current working element.
Parameters
----------
arg: str, melement, vec
moose element or path to be set as cwe.
See also
--------
getCwe
"""
_moose.setCwe(arg)
def ce(arg):
"""Alias for setCwe"""
_moose.setCwe(arg)
def useClock(tick, path, fn):
"""Schedule `fn` function of every object that matches `path` on tick no
`tick`. Usually you don't have to use it.
(FIXME: Needs update) The sequence of clockticks with the same dt is
according to their number. This is utilized for controlling the order of
updates in various objects where it matters. The following convention
should be observed when assigning clockticks to various components of a
model: Clock ticks 0-3 are for electrical (biophysical) components, 4 and 5
are for chemical kinetics, 6 and 7 are for lookup tables and stimulus, 8
and 9 are for recording tables.
Parameters
----------
tick : int
tick number on which the targets should be scheduled.
path : str
path of the target element(s). This can be a wildcard also.
fn : str
name of the function to be called on each tick. Commonly `process`.
Examples
--------
In multi-compartmental neuron model a compartment's membrane potential (Vm)
is dependent on its neighbours' membrane potential. Thus it must get the
neighbour's present Vm before computing its own Vm in next time step. This
ordering is achieved by scheduling the `init` function, which communicates
membrane potential, on tick 0 and `process` function on tick 1.
>>> moose.useClock(0, '/model/compartment_1', 'init')
>>> moose.useClock(1, '/model/compartment_1', 'process'));
"""
_moose.useClock(tick, path, fn)
def setClock(clockid, dt):
"""set the ticking interval of `tick` to `dt`.
A tick with interval `dt` will call the functions scheduled on that tick
every `dt` timestep.
Parameters
----------
tick : int
tick number
dt : double
ticking interval
"""
_moose.setClock(clockid, dt)
def loadModel(filename, modelpath, solverclass="gsl"):
"""loadModel: Load model (genesis/cspace) from a file to a specified path.
Parameters
----------
filename: str
model description file.
modelpath: str
moose path for the top level element of the model to be created.
method: str
solver type to be used for simulating the model.
TODO: Link to detailed description of solvers?
Returns
-------
melement
moose.element if succcessful else None.
See also
--------
moose.readNML2
moose.writeNML2 (NotImplemented)
moose.readSBML
moose.writeSBML
"""
return model_utils.mooseReadKkitGenesis(filename, modelpath, solverclass)
def copy(src, dest, name="", n=1, toGlobal=False, copyExtMsg=False):
"""Make copies of a moose object.
Parameters
----------
src : vec, element or str
source object.
dest : vec, element or str
Destination object to copy into.
name : str
Name of the new object. If omitted, name of the original will be used.
n : int
Number of copies to make (default=1).
toGlobal : bool
Relevant for parallel environments only. If false, the copies will
reside on local node, otherwise all nodes get the copies.
copyExtMsg : bool
If true, messages to/from external objects are also copied.
Returns
-------
vec
newly copied vec
"""
if isinstance(src, str):
src = _moose.element(src)
if isinstance(dest, str):
dest = _moose.element(dest)
if not name:
name = src.name
return _moose.copy(src.id, dest, name, n, toGlobal, copyExtMsg)
def rand(a=0.0, b=1.0):
"""Generate random number from the interval [0.0, 1.0)
Returns
-------
float in [0, 1) real interval generated by MT19937.
See also
--------
moose.seed() : reseed the random number generator.
Notes
-----
MOOSE does not automatically seed the random number generator. You
must explicitly call moose.seed() to create a new sequence of random
numbers each time.
"""
return _moose.rand(a, b)
def seed(seed=0):
"""Reseed MOOSE random number generator.
Parameters
----------
seed : int
Value to use for seeding.
default: random number generated using system random device
Notes
-----
All RNGs in moose except rand functions in moose.Function expression use
this seed.
By default (when this function is not called) seed is initializecd to some
random value using system random device (if available).
Returns
-------
None
See also
--------
moose.rand() : get a pseudorandom number in the [0,1) interval.
"""
_moose.seed(seed)
def pwe():
"""Print present working element's path.
Convenience function for GENESIS users. If you want to retrieve the element
in stead of printing the path, use moose.getCwe().
Returns
------
melement
current MOOSE element
Example
-------
>>> pwe()
'/'
"""
pwe_ = _moose.getCwe()
print(pwe_.path)
return pwe_
def le(el=None):
"""List elements under `el` or current element if no argument
specified.
Parameters
----------
el : str/melement/vec/None
The element or the path under which to look. If `None`, children of
current working element are displayed.
Returns
-------
List[str]
path of all children
"""
el = _moose.getCwe() if el is None else el
if isinstance(el, str):
el = _moose.element(el)
elif isinstance(el, _moose.vec):
el = el[0]
return _moose.le(el)
def showfields(el, field="*", showtype=False):
"""Show the fields of the element `el`, their data types and
values in human readable format. Convenience function for GENESIS
users.
Parameters
----------
el : melement/str
Element or path of an existing element.
field : str
Field to be displayed. If '*' (default), all fields are displayed.
showtype : bool
If True show the data type of each field. False by default.
Returns
-------
string
"""
if isinstance(el, str):
if not _moose.exists(el):
raise ValueError("no such element: %s" % el)
el = _moose.element(el)
result = []
if field == "*":
value_field_dict = _moose.getFieldDict(el.className, "valueFinfo")
max_type_len = max(len(dtype) for dtype in value_field_dict.values())
max_field_len = max(len(dtype) for dtype in value_field_dict.keys())
result.append("\n[" + el.path + "]\n")
for key, dtype in sorted(value_field_dict.items()):
if (
dtype == "bad"
or key == "this"
or key == "dummy"
or key == "me"
or dtype.startswith("vector")
or "ObjId" in dtype
):
continue
value = el.getField(key)
if showtype:
typestr = dtype.ljust(max_type_len + 4)
## The following hack is for handling both Python 2 and
## 3. Directly putting the print command in the if/else
## clause causes syntax error in both systems.
result.append(typestr + " ")
result.append(key.ljust(max_field_len + 4) + "=" + str(value) + "\n")
else:
try:
result.append(field + "=" + el.getField(field))
except AttributeError:
pass # Genesis silently ignores non existent fields
print("".join(result))
return "".join(result)
def showfield(el, field="*", showtype=False):
"""Alias for showfields."""
return showfields(el, field, showtype)
def listmsg(arg):
"""Return a list containing the incoming and outgoing messages of
`el`.
Parameters
----------
arg : melement/vec/str
MOOSE object or path of the object to look into.
Returns
-------
msg : list
List of Msg objects corresponding to incoming and outgoing connections
of `arg`.
"""
obj = _moose.element(arg)
assert obj
return _moose.listmsg(obj)
def showmsg(el):
"""Print the incoming and outgoing messages of `el`.
Parameters
----------
el : melement/vec/str
Object whose messages are to be displayed.
Returns
-------
None
"""
print(_moose.showmsg(_moose.element(el)))
def doc(arg, paged=True):
"""Display the documentation for class or field in a class.
Parameters
----------
arg : str/class/melement/vec
A string specifying a moose class name and a field name
separated by a dot. e.g., 'Neutral.name'. Prepending `moose.`
is allowed. Thus moose.doc('moose.Neutral.name') is equivalent
to the above.
It can also be string specifying just a moose class name or a
moose class or a moose object (instance of melement or vec
or there subclasses). In that case, the builtin documentation
for the corresponding moose class is displayed.
paged: bool
Whether to display the docs via builtin pager or print and
exit. If not specified, it defaults to False and
moose.doc(xyz) will print help on xyz and return control to
command line.
Returns
-------
None
Raises
------
NameError
If class or field does not exist.
"""
text = _moose.__generatedoc__(arg)
if pydoc.page:
pydoc.pager(text)
else:
print(text)
# SBML related functions.
def readSBML(filepath, loadpath, solver="ee", validate=True):
"""Load SBML model.
Parameters
----------
filepath: str
filepath to be loaded.
loadpath : str
Root path for this model e.g. /model/mymodel
solver : str
Solver to use (default 'ee').
Available options are "ee", "gsl", "stochastic", "gillespie"
"rk", "deterministic"
For full list see ??
validate: bool
When True, run the schema validation.
"""
return model_utils.mooseReadSBML(filepath, loadpath, solver, validate)
def writeSBML(modelpath, filepath, sceneitems={}):
"""Writes loaded model under modelpath to a file in SBML format.
Parameters
----------
modelpath : str
model path in moose e.g /model/mymodel
filepath : str
Path of output file.
sceneitems : dict
UserWarning: user need not worry about this layout position is saved in
Annotation field of all the moose Object (pool,Reaction,enzyme).
If this function is called from
* GUI - the layout position of moose object is passed
* command line - NA
* if genesis/kkit model is loaded then layout position is taken from the file
* otherwise auto-coordinates is used for layout position.
"""
return model_utils.mooseWriteSBML(modelpath, filepath, sceneitems)
def writeKkit(modelpath, filepath, sceneitems={}):
"""Writes loded model under modelpath to a file in Kkit format.
Parameters
----------
modelpath : str
Model path in moose.
filepath : str
Path of output file.
"""
return model_utils.mooseWriteKkit(modelpath, filepath, sceneitems)
def readNML2(modelpath, verbose=False):
"""Load neuroml2 model.
Parameters
----------
modelpath: str
Path of nml2 file.
verbose: True
(defalt False)
If True, enable verbose logging.
Raises
------
FileNotFoundError: If modelpath is not found or not readable.
"""
return model_utils.mooseReadNML2(modelpath, verbose)
def writeNML2(outfile):
"""Write model to NML2. (Not implemented)
"""
raise NotImplementedError("Writing to NML2 is not supported yet")
def addChemSolver(modelpath, solver):
"""Add solver on chemical compartment and its children for calculation.
(For developers)
Parameters
----------
modelpath : str
Model path that is loaded into moose.
solver : str
Exponential Euler "ee" is default. Other options are Gillespie ("gssa"),
Runge Kutta ("gsl"/"rk"/"rungekutta").
TODO
----
Documentation
See also
--------
deleteChemSolver
"""
return model_utils.mooseAddChemSolver(modelpath, solver)
def deleteChemSolver(modelpath):
"""Deletes solver on all the compartment and its children
Notes
-----
This is neccesary while created a new moose object on a pre-existing modelpath,
this should be followed by mooseAddChemSolver for add solvers on to compartment
to simulate else default is Exponential Euler (ee)
See also
--------
addChemSolver
"""
return model_utils.mooseDeleteChemSolver(modelpath)
def mergeChemModel(modelpath, dest):
"""Merges two models.
Merge chemical model in a file `modelpath` with existing MOOSE model at
path `dest`.
Parameters
----------
modelpath : str
Filepath containing a chemical model.
dest : path
Existing MOOSE path.
TODO
----
No example file which shows its use. Deprecated?
"""
return model_utils.mooseMergeChemModel(modelpath, dest)
|
BhallaLab/moose-core
|
python/moose/__init__.py
|
Python
|
gpl-3.0
| 23,812
|
[
"MOOSE",
"NEURON"
] |
22f265a4ea99da192b5df79f9f99fabc572e54856ca0e1a20f4d52d18accf397
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class kmsCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'asymmetric_decrypt': ('name', 'ciphertext', 'ciphertext_crc32c', ),
'asymmetric_sign': ('name', 'digest', 'digest_crc32c', 'data', 'data_crc32c', ),
'create_crypto_key': ('parent', 'crypto_key_id', 'crypto_key', 'skip_initial_version_creation', ),
'create_crypto_key_version': ('parent', 'crypto_key_version', ),
'create_ekm_connection': ('parent', 'ekm_connection_id', 'ekm_connection', ),
'create_import_job': ('parent', 'import_job_id', 'import_job', ),
'create_key_ring': ('parent', 'key_ring_id', 'key_ring', ),
'decrypt': ('name', 'ciphertext', 'additional_authenticated_data', 'ciphertext_crc32c', 'additional_authenticated_data_crc32c', ),
'destroy_crypto_key_version': ('name', ),
'encrypt': ('name', 'plaintext', 'additional_authenticated_data', 'plaintext_crc32c', 'additional_authenticated_data_crc32c', ),
'generate_random_bytes': ('location', 'length_bytes', 'protection_level', ),
'get_crypto_key': ('name', ),
'get_crypto_key_version': ('name', ),
'get_ekm_connection': ('name', ),
'get_import_job': ('name', ),
'get_key_ring': ('name', ),
'get_public_key': ('name', ),
'import_crypto_key_version': ('parent', 'algorithm', 'import_job', 'crypto_key_version', 'rsa_aes_wrapped_key', ),
'list_crypto_keys': ('parent', 'page_size', 'page_token', 'version_view', 'filter', 'order_by', ),
'list_crypto_key_versions': ('parent', 'page_size', 'page_token', 'view', 'filter', 'order_by', ),
'list_ekm_connections': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'list_import_jobs': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'list_key_rings': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
'mac_sign': ('name', 'data', 'data_crc32c', ),
'mac_verify': ('name', 'data', 'mac', 'data_crc32c', 'mac_crc32c', ),
'restore_crypto_key_version': ('name', ),
'update_crypto_key': ('crypto_key', 'update_mask', ),
'update_crypto_key_primary_version': ('name', 'crypto_key_version_id', ),
'update_crypto_key_version': ('crypto_key_version', 'update_mask', ),
'update_ekm_connection': ('ekm_connection', 'update_mask', ),
'get_iam_policy': ('resource', 'options', ),
'set_iam_policy': ('resource', 'policy', ),
'test_iam_permissions': ('resource', 'permissions', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=kmsCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the kms client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-kms
|
scripts/fixup_kms_v1_keywords.py
|
Python
|
apache-2.0
| 8,355
|
[
"VisIt"
] |
4159793087504aff34959c2afed9e8ab7fcc66343d4453d8f32e959dbaadd117
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
This package provides a collection of speed distribution classes.
Speed is a scalar quantity that describes the rate of change of a node position
in a simulation area (see: :mod:`sim2net.area`).
.. note::
In all speed distribution classes the quantity of speed should be
considered as simulation area units per one *simulation time* unit (see:
:mod:`sim2net._time`).
For example, the value of speed equal to :math:`5` would mean *five units
of simulation area per one unit of simulation time*.
"""
__docformat__ = 'reStructuredText'
__all__ = ['constant', 'normal', 'uniform']
|
mkalewski/sim2net
|
sim2net/speed/__init__.py
|
Python
|
mit
| 1,175
|
[
"VisIt"
] |
5db82b400bb7f81c14cfd2be5f8c8ab7f5b384f2270460711712a14820a41656
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml'))
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
META_REQUIREMENTS = (os.path.join('meta', 'requirements.yml'), os.path.join('meta', 'requirements.yaml'))
ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
def __init__(self, galaxy, api, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._requirements = None
self._install_info = None
self._validate_certs = not context.CLIARGS['ignore_certs']
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.galaxy = galaxy
self.api = api
self.name = name
self.version = version
self.src = src or name
self.scm = scm
self.paths = [os.path.join(x, self.name) for x in galaxy.roles_paths]
if path is not None:
if not path.endswith(os.path.join(os.path.sep, self.name)):
path = os.path.join(path, self.name)
else:
# Look for a meta/main.ya?ml inside the potential role dir in case
# the role name is the same as parent directory of the role.
#
# Example:
# ./roles/testing/testing/meta/main.yml
for meta_main in self.META_MAIN:
if os.path.exists(os.path.join(path, name, meta_main)):
path = os.path.join(path, self.name)
break
self.path = path
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
for path in self.paths:
for meta_main in self.META_MAIN:
meta_path = os.path.join(path, meta_main)
if os.path.isfile(meta_path):
try:
with open(meta_path, 'r') as f:
self._metadata = yaml.safe_load(f)
except Exception:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
break
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except Exception:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
@property
def _exists(self):
for path in self.paths:
if os.path.isdir(path):
return True
return False
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except Exception:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except Exception:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role to a temp location based on role data
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent())
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error(u"failed to download the file: %s" % to_text(e))
return False
def install(self):
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec)
elif self.src:
if os.path.isfile(self.src):
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
role_data = self.api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.api.api_server))
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role, and should only be installed using Ansible "
"Container" % self.name)
role_versions = self.api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
try:
loose_versions.sort()
except TypeError:
raise AnsibleError(
'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
'install.' % ', '.join([v.vstring for v in loose_versions])
)
self.version = to_text(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
# check if there's a source link for our role_version
for role_version in role_versions:
if role_version['name'] == self.version and 'source' in role_version:
self.src = role_version['source']
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the downloaded file does not appear to be a valid tar archive.")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
for meta_main in self.META_MAIN:
if meta_main in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except Exception:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off any higher-level directories for all of the files contained within
# the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
# hand, does not have a parent directory at all.
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not context.CLIARGS.get("force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if member.isreg() or member.issym():
n_member_name = to_native(member.name)
n_archive_parent_dir = to_native(archive_parent_dir)
n_parts = n_member_name.replace(n_archive_parent_dir, "", 1).split(os.sep)
n_final_parts = []
for n_part in n_parts:
# TODO if the condition triggers it produces a broken installation.
# It will create the parent directory as an empty file and will
# explode if the directory contains valid files.
# Leaving this as is since the whole module needs a rewrite.
if n_part != '..' and not n_part.startswith('~') and '$' not in n_part:
n_final_parts.append(n_part)
member.name = os.path.join(*n_final_parts)
role_tar_file.extract(member, to_native(self.path))
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e.errno == errno.EACCES and len(self.paths) > 1:
current = self.paths.index(self.path)
if len(self.paths) > current:
self.path = self.paths[current + 1]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not (self.src and os.path.isfile(self.src)):
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
@property
def requirements(self):
"""
Returns role requirements
"""
if self._requirements is None:
self._requirements = []
for meta_requirements in self.META_REQUIREMENTS:
meta_path = os.path.join(self.path, meta_requirements)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._requirements = yaml.safe_load(f)
except Exception:
display.vvvvv("Unable to load requirements for %s" % self.name)
finally:
f.close()
break
return self._requirements
|
s-hertel/ansible
|
lib/ansible/galaxy/role.py
|
Python
|
gpl-3.0
| 17,649
|
[
"Brian",
"Galaxy"
] |
b54bd525015a6ba5b9e9db92e3d17adde7db9834f45eed13d9a2311fa188f7e1
|
# Generated from antlr4-python3-runtime-4.7.2/src/autogen/Grammar.g4 by ANTLR 4.7.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .GrammarParser import GrammarParser
else:
from GrammarParser import GrammarParser
# This class defines a complete generic visitor for a parse tree produced by GrammarParser.
'''
COMO RESGATAR INFORMAÇÕES DA ÁRVORE
Observe o seu Grammar.g4. Cada regra sintática gera uma função com o nome corespondente no Visitor e na ordem em que está na gramática.
Se for utilizar sua gramática do projeto 1, por causa de conflitos com Python, substitua as regras file por fiile e type por tyype. Use prints temporários para ver se está no caminho certo.
"make tree" agora desenha a árvore sintática, se quiser vê-la para qualquer input, enquanto "make" roda este visitor sobre o a árvore gerada a partir de Grammar.g4 alimentada pelo input.
Exemplos:
# Obs.: Os exemplos abaixo utilizam nós 'expression', mas servem apra qualquer tipo de nó
self.visitChildren(ctx) # visita todos os filhos do nó atual
expr = self.visit(ctx.expression()) # visita a subárvore do nó expression e retorna o valor retornado na função "visitRegra"
for i in range(len(ctx.expression())): # para cada expressão que este nó possui...
ident = ctx.expression(i) # ...pegue a i-ésima expressão
if ctx.FLOAT() != None: # se houver um FLOAT (em vez de INT ou VOID) neste nó (parser)
return Type.FLOAT # retorne tipo float
ctx.identifier().getText() # Obtém o texto contido no nó (neste caso, será obtido o nome do identifier)
token = ctx.identifier(i).IDENTIFIER().getPayload() # Obtém o token referente à uma determinada regra léxica (neste caso, IDENTIFIER)
token.line # variável com a linha do token
token.column # variável com a coluna do token
'''
# Dica: Retorne Type.INT, Type.FLOAT, etc. Nos nós e subnós das expressões para fazer a checagem de tipos enquanto percorre a expressão.
class Type:
VOID = "void"
INT = "int"
FLOAT = "float"
STRING = "char *"
class GrammarCheckerVisitor(ParseTreeVisitor):
ids_defined = {} # Dicionário para armazenar as informações necessárias para cada identifier definido
inside_what_function = "" # String que guarda a função atual que o visitor está visitando. Útil para acessar dados da função durante a visitação da árvore sintática da função.
# Visit a parse tree produced by GrammarParser#fiile.
def visitFiile(self, ctx:GrammarParser.FiileContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#function_definition.
def visitFunction_definition(self, ctx:GrammarParser.Function_definitionContext):
tyype = ctx.tyype().getText()
name = ctx.identifier().getText()
params = self.visit(ctx.arguments())
self.ids_defined[name] = tyype, params, None
self.inside_what_function = name
self.visit(ctx.body())
return
# Visit a parse tree produced by GrammarParser#body.
def visitBody(self, ctx:GrammarParser.BodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#statement.
def visitStatement(self, ctx:GrammarParser.StatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#if_statement.
def visitIf_statement(self, ctx:GrammarParser.If_statementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#else_statement.
def visitElse_statement(self, ctx:GrammarParser.Else_statementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#for_loop.
def visitFor_loop(self, ctx:GrammarParser.For_loopContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#for_initializer.
def visitFor_initializer(self, ctx:GrammarParser.For_initializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#for_condition.
def visitFor_condition(self, ctx:GrammarParser.For_conditionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#for_step.
def visitFor_step(self, ctx:GrammarParser.For_stepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#variable_definition.
def visitVariable_definition(self, ctx:GrammarParser.Variable_definitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#variable_assignment.
def visitVariable_assignment(self, ctx:GrammarParser.Variable_assignmentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#expression.
def visitExpression(self, ctx:GrammarParser.ExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#array.
def visitArray(self, ctx:GrammarParser.ArrayContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#array_literal.
def visitArray_literal(self, ctx:GrammarParser.Array_literalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#function_call.
def visitFunction_call(self, ctx:GrammarParser.Function_callContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#arguments.
def visitArguments(self, ctx:GrammarParser.ArgumentsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#tyype.
def visitTyype(self, ctx:GrammarParser.TyypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#integer.
def visitInteger(self, ctx:GrammarParser.IntegerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#floating.
def visitFloating(self, ctx:GrammarParser.FloatingContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#string.
def visitString(self, ctx:GrammarParser.StringContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by GrammarParser#identifier.
def visitIdentifier(self, ctx:GrammarParser.IdentifierContext):
return self.visitChildren(ctx)
|
damorim/compilers-cin
|
2020_3/projeto2/GrammarCheckerVisitor.py
|
Python
|
mit
| 6,465
|
[
"VisIt"
] |
23ecee7a205bba68d87700c4d8e923e6adb51fe70c6b548f69c2e77463ea987e
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
from numpy.testing import (
dec,
assert_,
assert_equal,
)
from nose.plugins.attrib import attr
import MDAnalysis as mda
from MDAnalysisTests import parser_not_found, make_Universe
from MDAnalysis.tests.datafiles import PSF, DCD
class TestSegment(object):
def setUp(self):
self.universe = make_Universe(('segids',))
self.sB = self.universe.segments[1]
def test_type(self):
assert_(isinstance(self.sB, mda.core.groups.Segment))
assert_equal(self.sB.segid, "SegB")
def test_index(self):
s = self.sB
res = s.residues[3]
assert_(isinstance(res, mda.core.groups.Residue))
def test_slicing(self):
res = self.sB.residues[:3]
assert_equal(len(res), 3)
assert_(isinstance(res, mda.core.groups.ResidueGroup))
def test_advanced_slicing(self):
res = self.sB.residues[[2, 1, 0, 2]]
assert_equal(len(res), 4)
assert_(isinstance(res, mda.core.groups.ResidueGroup))
def test_atom_order(self):
assert_equal(self.universe.segments[0].atoms.indices,
sorted(self.universe.segments[0].atoms.indices))
@attr("issue")
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_generated_residueselection():
"""Test that a generated residue group always returns a ResidueGroup (Issue 47)
unless there is a single residue (Issue 363 change)"""
universe = mda.Universe(PSF, DCD)
# only a single Cys in AdK
cys = universe.s4AKE.CYS
assert_(isinstance(cys, mda.core.groups.Residue),
"Single Cys77 is NOT returned as a single Residue (Issue 47)")
# multiple Met
met = universe.s4AKE.MET
assert_(isinstance(met, mda.core.groups.ResidueGroup),
"Met selection does not return a ResidueGroup")
del universe
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/core/test_segment.py
|
Python
|
gpl-2.0
| 2,949
|
[
"MDAnalysis"
] |
44ef8ad2357f7e436a968688c0f897d9cbcb5632f12cbdf230936eef60d908ea
|
"""
================
Neuropop Example
================
A demonstration of Neuropop using simulated data
"""
########################################################
import numpy as np
import matplotlib.pyplot as plt
from spykes.ml.neuropop import NeuroPop
from spykes.utils import train_test_split
########################################################
# Create a NeuroPop object
# -----------------------------
n_neurons = 10
pop = NeuroPop(n_neurons=n_neurons, tunemodel='glm')
########################################################
# Simulate a population of neurons
# -----------------------------
n_samples = 1000
x, Y, mu, k0, k, g, b = pop.simulate(pop.tunemodel, n_samples=n_samples,
winsize=400.0)
########################################################
# Split into training and testing sets
# -----------------------------
np.random.seed(42)
(Y_train, Y_test), (x_train, x_test) = train_test_split(Y, x, percent=0.5)
########################################################
# Fit the tuning curves with gradient descent
# -----------------------------
pop.fit(x_train, Y_train)
########################################################
# Predict the population activity with the fit tuning curves
# -----------------------------
Yhat_test = pop.predict(x_test)
########################################################
# Score the prediction
# -----------------------------
Ynull = np.mean(Y_train, axis=0)
pseudo_R2 = pop.score(Y_test, Yhat_test, Ynull, method='pseudo_R2')
print(pseudo_R2)
########################################################
# Plot the simulated and fit tuning curves
# -----------------------------
plt.figure(figsize=[15, 15])
for neuron in range(pop.n_neurons):
plt.subplot(4, 3, neuron + 1)
pop.display(x_test, Y_test[:, neuron], neuron=neuron,
ylim=[0.8 * np.min(Y_test[:, neuron]), 1.2 *
np.max(Y_test[:, neuron])])
plt.show()
########################################################
# Decode feature from the population activity
# -----------------------------
xhat_test = pop.decode(Y_test)
########################################################
# Visualize ground truth vs. decoded estimates
# -----------------------------
plt.figure(figsize=[6, 5])
plt.plot(x_test, xhat_test, 'k.', alpha=0.5)
plt.xlim([-1.2 * np.pi, 1.2 * np.pi])
plt.ylim([-1.2 * np.pi, 1.2 * np.pi])
plt.xlabel('Ground truth [radians]')
plt.ylabel('Decoded [radians]')
plt.tick_params(axis='y', right='off')
plt.tick_params(axis='x', top='off')
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.figure(figsize=[15, 5])
jitter = 0.2 * np.random.rand(x_test.shape[0])
plt.subplot(132, polar=True)
plt.plot(x_test, np.ones(x_test.shape[0]) + jitter, 'ko', alpha=0.5)
plt.title('Ground truth')
plt.subplot(133, polar=True)
plt.plot(xhat_test, np.ones(xhat_test.shape[0]) + jitter, 'co', alpha=0.5)
plt.title('Decoded')
plt.show()
########################################################
# Score decoding performance
# -----------------------------
# Circular correlation
circ_corr = pop.score(x_test, xhat_test, method='circ_corr')
print('Circular correlation: %f' % (circ_corr))
########################################################
# Cosine distance
cosine_dist = pop.score(x_test, xhat_test, method='cosine_dist')
print('Cosine distance: %f' % (cosine_dist))
|
codekansas/spykes
|
examples/plot_neuropop_simul_example.py
|
Python
|
mit
| 3,434
|
[
"NEURON"
] |
19d6ca371847bd64a76ee9cf3d9dae6f16715df07b39eaea7ec7b9f6ed89172d
|
from __future__ import division
import copy
import numpy
import random
import re
RUNNERS = [
["Adam", "0:05:47", "M"],
["Ali", "0:06:38", "F"],
["Andrew", "0:06:55", "M"],
["Blonde Megan", "0:08:38", "F"],
# ["Brian", "0:06:43", "M"],
# ["Brian Tall", "0:06:24", "M"],
# ["Collin", "0:06:14", "M"],
# ["Dave", "0:06:01", "M"],
# ["Dylan", "0:06:43", "M"],
# ["Jake", "0:06:55", "M"],
["James", "0:07:51", "M"],
["Jason", "0:07:27", "M"],
["Jdao", "0:07:34", "M"],
["Jimbo", "0:07:51", "M"],
["Joe", "0:07:03", "M"],
["John", "0:07:15", "M"],
["Lauren", "0:08:09", "F"],
["Mark", "0:07:18", "M"],
["Matt", "0:05:54", "M"],
["Meaghan Creamer", "0:06:53", "F"],
["Ncik", "0:06:24", "M"],
["Nick", "0:06:21", "M"],
["Nicole", "0:08:38", "F"],
["Olivia", "0:07:25", "F"],
["Parks", "0:07:44", "M"],
["Poot", "0:07:19", "M"],
["Sam Tall", "0:05:23", "M"],
["Shaundry", "0:08:30", "F"],
["Tommy Doug", "0:05:48", "M"],
["Tucker", "0:07:10", "M"]
]
NUM_TEAMS = 4
NUM_RUNS = 5000
# Statistic weights
TIME_WEIGHT = 1
TEAM_SIZE_WEIGHT = 10
GENDER_WEIGHT = 100
class Player(object):
def __init__(self, name, time_string, gender):
self.name = name
self.time_string = time_string
self.gender = gender
@staticmethod
def _get_numeric_time(time):
"""time is of the form "H:MM:SS". We convert to total seconds"""
_, minutes, seconds = map(int, re.split(":", time))
return minutes * 60 + seconds
def get_time(self):
return self._get_numeric_time(self.time_string)
def is_male(self):
return self.gender == "M"
class Team(set):
def __str__(self):
return(" Time: {}. {} players, {} male: {}".format(
self.total_time(), len(self), self.num_males(),
", ".join([player.name for player in self])))
def total_time(self):
return sum([player.get_time() for player in self])
def num_males(self):
return len([player for player in self if player.is_male()])
class Solution(object):
def __init__(self):
self.teams = []
for _ in range(NUM_TEAMS):
self.teams.append(Team())
def fitness_score(self):
# We use the measures of three variances to determine our score:
# - total team mile time
# - num males on the team
# - team size
#
# The lowest score will be the one that does the best at minimizing the
# difference between teams in these catagories.
time_variance = numpy.var([team.total_time() for team in self.teams])
team_size_variance = numpy.var([len(team) for team in self.teams])
gender_variance = numpy.var([team.num_males() for team in self.teams])
return(time_variance * TIME_WEIGHT + team_size_variance * TEAM_SIZE_WEIGHT
+ gender_variance * GENDER_WEIGHT)
def add_player_to_random_team(self, player):
random.choice(self.teams).add(player)
def change_random_player(self):
old_team = random.choice(self.teams)
if len(old_team) == 0:
print "empty team"
return
player = random.sample(old_team, 1)[0]
old_team.remove(player)
new_team = random.choice(self.teams)
new_team.add(player)
def __str__(self):
return "\n".join(map(str, self.teams))
def main():
initial_solution = Solution()
for name, time, gender in RUNNERS:
initial_solution.add_player_to_random_team(Player(name, time, gender))
print("Splitting {} players ({} male) into {} teams".format(
len(RUNNERS), len([runner for runner in RUNNERS if runner[2] == "M"]),
NUM_TEAMS))
print("Starting with solution score {:.2f}:\n{}".format(
initial_solution.fitness_score(), initial_solution))
for run_num in range(NUM_RUNS):
if run_num % 1000 == 0:
print("Current best solution with score {:.2f}:\n{}".format(
initial_solution.fitness_score(), initial_solution))
solution = copy.deepcopy(initial_solution)
for _ in range(random.randint(1, 10)):
solution.change_random_player()
if solution.fitness_score() < initial_solution.fitness_score():
initial_solution = solution
print("Best solution found, with solution score {:.2f}:\n{}".format(
initial_solution.fitness_score(), initial_solution))
if __name__ == "__main__":
main()
|
topher200/balanced-teams
|
balanced_teams.py
|
Python
|
mit
| 4,353
|
[
"Brian"
] |
5fdc6dafb0601337fc225d826dd51594537293ce9153e98087c31ad435dfc672
|
# -*- coding: utf-8 -*-
"""Specifies static assets (CSS, JS) required by the CATMAID front-end.
This module specifies all the static files that are required by the
CATMAID front-end. The configuration is separated in libraries and CATMAID's
own files:
Libraries: To add a new library, add a new entry into the libraries_js
dictionary and, if needed, add the libraries CSS files to sourcefiles
tuple of the 'library' entry in the ``STYLESHEETS`` dictionary.
CATMAID files: By default all CSS files in the ``static/css`` directory are
included as well as all JavaScript files in ``static/js`` and CATMAID's
subdirectories in it. However, if you want to add new files explicitly, add
CSS to the source_filenames tuple in the 'catmaid' entry of the ``STYLESHEETS``
dictionary. JavaScript files go into the 'catmaid' entry of the ``JAVASCRIPT``
dictonary at the end of this file.
"""
from collections import OrderedDict
from importlib import import_module
# python module names of CATMAID extensions which could potentially be installed
KNOWN_EXTENSIONS = (
'synapsesuggestor',
'autoproofreader',
'circuitmap',
)
class PipelineSpecUpdater(object):
def __init__(self, input_dict=None):
if input_dict is None:
input_dict = OrderedDict()
self.result = input_dict
self.existing_output_files = set()
def update(self, other_dict, key_prefix='catmaid-ext-'):
"""Include items from other_dict in the input dict, ensuring that no data will be overwritten and the result
will not cause multiple libraries to create static files of the same name. key_prefix will be prepended to
the keys in other_dict when they are inserted into the input dict (default 'catmaid-ext-')."""
for key, value in other_dict.items():
new_key = key_prefix + str(key)
assert new_key not in self.result, 'Extension static file IDs must not overwrite existing static file IDs'
assert value['output_filename'] not in self.existing_output_files, \
'Extension static files must not overwrite existing static files ({})'.format(value['output_filename'])
self.existing_output_files.add(value['output_filename'])
self.result['{}{}'.format(key_prefix, key)] = value
STYLESHEETS = OrderedDict()
STYLESHEETS['libraries'] = {
'source_filenames': (
'libs/jquery/themes/smoothness/jquery-ui.css',
'libs/jquery/datatable/css/demo_table.css',
'libs/jquery/datatable/extras/Buttons/css/buttons.dataTables.css',
'libs/jquery/jquery.growl.css',
'libs/jquery/jquery-ui.combobox.css',
'libs/jsTree/themes/default/style.css',
),
'output_filename': 'css/libraries.css',
'extra_context': {
'media': 'screen,projection',
}
}
STYLESHEETS['catmaid'] = {
'source_filenames': (
'css/*.css',
),
'output_filename': 'css/catmaid.css',
'extra_context': {
'media': 'screen,projection',
}
}
libraries_js = OrderedDict([
('modernizr', ['*.js']),
('jquery', ['jquery-2.1.3.min.js',
'jquery-ui.min.js', 'jquery-ui.*.js',
'jquery.dataTables.min.js', 'jquery.*.js',
'dataTables.buttons.js', 'buttons.html5.min.js']),
('jszip', ['*.js']),
('jsTree', ['jstree.js']),
('colorpicker', ['colors.js', 'colorPicker.data.js', 'colorPicker.js',
'jqColor.js']),
('fabric.js', ['all.modified.js']),
('raphael', ['raphael.js', 'g.raphael.js', 'g.pie-min.js', 'g.line.altered.js',
'raphael-custom.js', 'colorwheel.js', 'raphael.export.js']),
('d3', ['d3.v3.js', 'venn.js', 'mds.js', 'colorbrewer.js']),
('sylvester', ['sylvester.js']),
('msgpack-lite', ['msgpack.min.js']),
('numeric', ['numeric-1.2.6.js']),
('numjs', ['numjs.min.js']),
('three.js', ['three.js', 'controls/TrackballControls.js',
'camera/CombinedCamera.js', 'WebGL.js',
'lines/LineSegmentsGeometry.js', 'lines/LineGeometry.js',
'lines/LineSegments2.js', 'lines/Line2.js',
'lines/LineMaterial.js', 'loaders/VRMLLoader.js',
'lines/Wireframe.js', 'lines.WireframeGeometry2',
'loaders/VRMLLoader.js', 'renderer/Projector.js',
'renderer/SVGRenderer.js', 'exporters/OBJExporter.js',
'math/Lut.js', 'modifiers/*.js']),
('threex', ['*.js']),
('plotly', ['*.js']),
('pixi.js', ['*.js']),
('pointyjs', ['*.js']),
('cytoscapejs', ['cytoscape.js', 'cytoscape-spread.js',
'arbor.js', 'cytoscape-arbor.js',
'cola.js', 'cytoscape-cola.js',
'dagre.js', 'cytoscape-dagre.js',
'springy.js', 'cytoscape-springy.js']),
('jsnetworkx', ['*.js']),
('filesaver', ['*.js']),
('screw-filereader', ['*.js']),
('streamsaver', ['StreamSaver.js', 'polyfill.min.js']),
('webm-writer.js', ['*.js']),
('blazy', ['blazy.min.js']),
('geometry', ['geometry.js', 'intersects.js']), # order matters
('catmaid', ['namespace.js', 'error.js', 'events.js', 'request.js',
'tools.js', 'lru-cache.js', 'CATMAID.js', 'state.js',
'command.js', 'models/*.js', 'skeleton_source.js',
'datastores.js', 'settings-manager.js', '*.js']),
])
JAVASCRIPT = OrderedDict()
for k, v in libraries_js.items():
JAVASCRIPT[k + '-lib'] = {
'source_filenames': ['libs/%s/%s' % (k, f) for f in v],
'output_filename': 'js/libs/%s-lib.js' % k,
}
# Some libraries expect their own JavaScript files to be available under a
# particular name. Therefore, we can't use pipeline with them and include them
# separately. Entries follow the same pattern as above: key - path.
non_pipeline_js = {}
# Even non-pipeline files have to be made known to pipeline, because it takes
# care of collecting them into the STATIC_ROOT directory.
for k, v in non_pipeline_js.items():
JAVASCRIPT[k] = {
'source_filenames': [v],
'output_filename': v
}
# Like non_pipeline_js, these files aren't compressed. They are however only
# copied to the output directory and are not supposed to be imported/loaded by
# the front-end.
copy_only_files = {
'streamsaver-worker-1': 'libs/streamsaver/worker/mitm.html',
'streamsaver-worker-2': 'libs/streamsaver/worker/ping.html',
'streamsaver-worker-3': 'libs/streamsaver/worker/ping.js',
'streamsaver-worker-4': 'libs/streamsaver/worker/sw.js',
'neuroglancer-worker': 'libs/neuroglancer/chunk_worker.bundle.js',
'neuroglancer-draco': 'libs/neuroglancer/draco.bundle.js',
'neuroglancer-tfjs-library': 'libs/neuroglancer/tfjs-library.bundle.js',
'neuroglancer-async-computation': 'libs/neuroglancer/async_computation.bundle.js',
'neuroglancer-main': 'libs/neuroglancer/main.bundle.js',
}
# Let pipeline know about copy-only files.
for k, v in copy_only_files.items():
JAVASCRIPT[k] = {
'source_filenames': [v],
'output_filename': v
}
# Regular CATMAID front-end files
JAVASCRIPT['catmaid'] = {
'source_filenames': (
'js/CATMAID.js',
'js/dom.js',
'js/extensions.js',
'js/data-view.js',
'js/action.js',
'js/settings-manager.js',
'js/helpers/*.js',
'js/init.js',
'js/network-api.js',
'js/project.js',
'js/stack.js',
'js/reoriented-stack.js',
'js/stack-viewer.js',
'js/tile-source.js',
'js/treelines.js',
'js/ui.js',
'js/layout.js',
'js/user.js',
'js/remote.js',
'js/WindowMaker.js',
'js/command.js',
'js/skeleton-model.js',
'js/skeleton-group.js',
'js/time-series.js',
'js/tools/navigator.js',
'js/tools/box-selection-tool.js',
'js/tools/roi-tool.js',
'js/tools/*.js',
'js/layers/stack-layer.js',
'js/layers/tile-layer.js',
'js/layers/pixi-layer.js',
'js/layers/pixi-tile-layer.js',
'js/layers/*.js',
'js/widgets/detail-dialog.js',
'js/widgets/options-dialog.js',
'js/3d/*.js',
'js/image-block.js',
'js/label-annotations.js',
'js/widgets/*.js',
),
'output_filename': 'js/catmaid.js',
}
installed_extensions = []
stylesheet_updater = PipelineSpecUpdater(STYLESHEETS)
non_pipeline_js_updater = PipelineSpecUpdater(non_pipeline_js)
javascript_updater = PipelineSpecUpdater(JAVASCRIPT)
for app_name in KNOWN_EXTENSIONS:
try:
app = import_module(app_name)
installed_extensions.append(app_name)
app_pipelinefiles = import_module(app_name + '.pipelinefiles')
except ImportError:
continue
try:
stylesheet_updater.update(app_pipelinefiles.STYLESHEETS)
except AttributeError:
pass
try:
non_pipeline_js_updater.update(app_pipelinefiles.non_pipeline_js)
except AttributeError:
pass
try:
javascript_updater.update(app_pipelinefiles.JAVASCRIPT)
except AttributeError:
pass
|
tomka/CATMAID
|
django/projects/mysite/pipelinefiles.py
|
Python
|
gpl-3.0
| 9,206
|
[
"Cytoscape"
] |
6c474875998aa5b49296a4eee5a00dd669a5b984c3cdec4482b7ab918619b6e5
|
from ase.structure import molecule
from gpaw import GPAW
from gpaw import dscf
from gpaw.test import equal
# Ground state calculation
calc = GPAW(mode='lcao',
basis='dzp',
nbands=8,
h=0.2,
xc='PBE',
spinpol=True,
convergence={'energy': 100,
'density': 1e-3,
'bands': -1})
CO = molecule('CO')
CO.center(vacuum=3)
CO.set_calculator(calc)
E_gs = CO.get_potential_energy()
# Excited state calculation
calc_es = GPAW(mode='lcao',
basis='dzp',
nbands=8,
h=0.2,
xc='PBE',
spinpol=True,
convergence={'energy': 100,
'density': 1e-3,
'bands': -1})
CO.set_calculator(calc_es)
lumo = dscf.MolecularOrbital(calc,
weights={0: [0, 0, 0, 1], 1: [0, 0, 0, -1]})
dscf.dscf_calculation(calc_es, [[1.0, lumo, 1]], CO)
E_es = CO.get_potential_energy()
dE = E_es - E_gs
print dE
equal(dE, 5.7595110076, 0.011)
|
robwarm/gpaw-symm
|
gpaw/test/dscf_lcao.py
|
Python
|
gpl-3.0
| 1,095
|
[
"ASE",
"GPAW"
] |
77346e218b24dc7e954f723a93bb44fbbdc7d46a3305ddf3c89e5470a64be9d0
|
""" :mod: GFAL2_GSIFTPStorage
=================
.. module: python
:synopsis: GSIFTP module based on the GFAL2_StorageBase class.
"""
# from DIRAC
from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase
from DIRAC import gLogger
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
class GFAL2_GSIFTPStorage( GFAL2_StorageBase ):
""" .. class:: GFAL2_GSIFTPStorage
GSIFTP interface to StorageElement using gfal2
"""
_INPUT_PROTOCOLS = ['file', 'gsiftp']
_OUTPUT_PROTOCOLS = ['gsiftp']
def __init__( self, storageName, parameters ):
""" c'tor
"""
# # init base class
super( GFAL2_GSIFTPStorage, self ).__init__( storageName, parameters )
self.srmSpecificParse = False
self.log = gLogger.getSubLogger( "GFAL2_GSIFTPStorage" )
self.pluginName = 'GFAL2_GSIFTP'
# We don't need extended attributes for metadata
self._defaultExtendedAttributes = None
def __addDoubleSlash( self, res ):
""" Utilities to add the double slash between the host(:port) and the path
:param res: DIRAC return structure which contains an URL if S_OK
:return: DIRAC structure with corrected URL
"""
if not res['OK']:
return res
url = res['Value']
res = pfnparse( url, srmSpecific = self.srmSpecificParse )
if not res['OK']:
return res
urlDict = res['Value']
urlDict['Path'] = '/' + urlDict['Path']
return pfnunparse( urlDict, srmSpecific = self.srmSpecificParse )
def getURLBase( self, withWSUrl = False ):
""" Overwrite to add the double slash """
return self.__addDoubleSlash( super( GFAL2_GSIFTPStorage, self ).getURLBase( withWSUrl = withWSUrl ) )
def constructURLFromLFN( self, lfn, withWSUrl = False ):
""" Overwrite to add the double slash """
return self.__addDoubleSlash( super( GFAL2_GSIFTPStorage, self ).constructURLFromLFN( lfn = lfn, withWSUrl = withWSUrl ) )
def getCurrentURL( self, fileName ):
""" Overwrite to add the double slash """
return self.__addDoubleSlash( super( GFAL2_GSIFTPStorage, self ).getCurrentURL( fileName ) )
|
Andrew-McNab-UK/DIRAC
|
Resources/Storage/GFAL2_GSIFTPStorage.py
|
Python
|
gpl-3.0
| 2,107
|
[
"DIRAC"
] |
3343e8b44ea2a6c11381e3f582d6236ec5049549013788c57ea24075f1a8fd49
|
from ovito import *
import sys
sys.exit(2)
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/system_exit.py
|
Python
|
gpl-3.0
| 43
|
[
"OVITO"
] |
ebea722074486e35c9197c95b00a4e54da47fbbd0a10a4c1453d8d812006ff9c
|
#!/usr/bin/env python
"Check binaries configuration"
#
# Copyright (C) 2012-2021 ABINIT Group (Yann Pouillon)
#
# This file is part of the ABINIT software package. For license information,
# please see the COPYING file in the top-level directory of the ABINIT source
# distribution.
#
from __future__ import unicode_literals, division, print_function, absolute_import
from abirules_tools import find_abinit_toplevel_directory
try:
from ConfigParser import ConfigParser,NoOptionError
except ImportError:
from configparser import ConfigParser, NoOptionError
import os
import re
import sys
class MyConfigParser(ConfigParser):
def optionxform(self,option):
return str(option)
# ---------------------------------------------------------------------------- #
#
# Auxiliary data
#
dep_levels = {
"bigdft":10,
"fft":8,
"levmar":9,
"libpsml":9,
"libxc":9,
"libxml2":0,
"linalg":7,
"mpi":1,
"gpu":2,
"hdf5":4,
"netcdf":5,
"netcdf_fortran":6,
"papi":3,
"triqs":3,
"wannier90":9,
"xmlf90":3,
}
def main():
home_dir = find_abinit_toplevel_directory()
# Init
cnf_bin = MyConfigParser()
cnf_fname = os.path.join(home_dir, "config/specs/binaries.conf")
assert os.path.exists(cnf_fname)
cnf_bin.read(cnf_fname)
bin_list = cnf_bin.sections()
bin_list.sort()
dep_order = {}
lib_order = {}
re_num = re.compile("[0-9][0-9]_")
# Check order of dependencies and libraries
for prg in bin_list:
if cnf_bin.has_option(prg,"dependencies"):
bin_deps = cnf_bin.get(prg,"dependencies").split()
else:
bin_deps = []
dep_old = 100
dep_new = 100
for dep in bin_deps:
if dep in dep_levels:
dep_new = dep_levels[dep]
else:
sys.stderr.write("Error: unregistered dependency '%s'\n" % dep)
sys.exit(10)
if dep_new > dep_old:
if prg not in dep_order:
dep_order[prg] = list()
dep_order[prg].append(dep)
dep_old = dep_new
if cnf_bin.has_option(prg,"libraries"):
bin_libs = cnf_bin.get(prg,"libraries").split()
else:
bin_libs = list()
lib_old = 100
lib_new = 100
for lib in bin_libs:
if re_num.match(lib):
lib_new = int(re.sub("_.*","",lib))
if lib_new > lib_old:
if prg not in lib_order:
lib_order[prg] = list()
lib_order[prg].append(lib)
lib_old = lib_new
# Report any disorder
nerr = len(dep_order) + len(lib_order)
if nerr > 0 :
sys.stderr.write("%s: reporting disordered libraries\n\n" % (os.path.basename(sys.argv[0])))
sys.stderr.write("X: D=Dependency / L=Library\n\n")
sys.stderr.write("%s %-24s %-24s\n" % ("X","Program","Dependency/Library"))
sys.stderr.write("%s %s %s\n" % ("-","-" * 24,"-" * 24))
dep_keys = list(dep_order.keys())
dep_keys.sort()
for prg in dep_keys:
for dep in dep_order[prg]:
sys.stderr.write("%s %-24s %-24s\n" % ("D",prg,dep))
lib_keys = list(lib_order.keys())
lib_keys.sort()
for prg in lib_keys:
for lib in lib_order[prg]:
sys.stderr.write("%s %-24s %-24s\n" % ("L",prg,lib))
sys.stderr.write("\n")
return nerr
if __name__ == "__main__":
sys.exit(main())
|
abinit/abinit
|
abichecks/scripts/check-binaries-conf.py
|
Python
|
gpl-3.0
| 3,229
|
[
"ABINIT",
"NetCDF",
"Wannier90"
] |
b90bb834904d8568fc7c042b1f626f645d4b02abebb0eb977b37e0d6160f3735
|
from contextlib import contextmanager
from django.utils.crypto import get_random_string
from ..ga_helpers import GaccoTestMixin, SUPER_USER_INFO
from ...pages.biz.ga_contract import BizContractDetailPage, BizContractPage
from ...pages.biz.ga_dashboard import DashboardPage
from ...pages.biz.ga_invitation import BizInvitationPage, BizInvitationConfirmPage
from ...pages.biz.ga_w2ui import remove_grid_row_index
from ...pages.lms.ga_dashboard import DashboardPage as GaDashboardPage
PLATFORMER_USER_INFO = {
'username': 'plat_platformer',
'password': 'platPlatformer3',
'email': 'plat_platformer@example.com',
}
AGGREGATOR_USER_INFO = {
'username': 'owner_aggregator',
'password': 'ownerAggregator3',
'email': 'owner_aggregator@example.com',
}
A_DIRECTOR_USER_INFO = {
'username': 'acom_director',
'password': 'acomDirector3',
'email': 'acom_director@example.com',
}
A_MANAGER_USER_INFO = {
'username': 'acom_manager',
'password': 'acomManager3',
'email': 'acom_manager@example.com',
}
B_DIRECTOR_USER_INFO = {
'username': 'bcom_director',
'password': 'bcomDirector3',
'email': 'bcom_director@example.com',
}
B_MANAGER_USER_INFO = {
'username': 'bcom_manager',
'password': 'bcomManager3',
'email': 'bcom_manager@example.com',
}
C_DIRECTOR_USER_INFO = {
'username': 'ccom_director',
'password': 'ccomDirector3',
'email': 'ccom_director@example.com',
}
C_MANAGER_USER_INFO = {
'username': 'ccom_manager',
'password': 'ccomManager3',
'email': 'ccom_manager@example.com',
}
PLAT_COMPANY = 1
PLAT_COMPANY_NAME = 'plat org'
PLAT_COMPANY_CODE = 'plat'
OWNER_COMPANY = 2
OWNER_COMPANY_NAME = 'owner company'
OWNER_COMPANY_CODE = 'owner'
A_COMPANY = 3
A_COMPANY_NAME = 'A company'
A_COMPANY_CODE = 'acom'
B_COMPANY = 4
B_COMPANY_NAME = 'B company'
B_COMPANY_CODE = 'bcom'
C_COMPANY = 5
C_COMPANY_NAME = 'C company'
C_COMPANY_CODE = 'ccom'
@contextmanager
def visit_page_on_new_window(page_object):
current_handle = page_object.browser.current_window_handle
page_object.browser.execute_script('''
window.open("{}", "_blank");
'''.format(page_object.url))
page_object.browser.switch_to_window(page_object.browser.window_handles[-1])
yield page_object.wait_for_page()
page_object.browser.close()
page_object.browser.switch_to_window(current_handle)
class GaccoBizTestMixin(GaccoTestMixin):
"""
Mixin for gacco biz tests
"""
def assert_grid_row(self, grid_row, assert_dict):
for assert_key, assert_value in assert_dict.items():
self.assertIn(assert_key, grid_row)
self.assertEqual(assert_value, grid_row[assert_key])
def assert_grid_row_in(self, grid_row, grid_rows):
self.assertIn(
remove_grid_row_index(grid_row),
[remove_grid_row_index(r) for r in grid_rows]
)
def assert_grid_row_not_in(self, grid_row, grid_rows):
self.assertNotIn(
remove_grid_row_index(grid_row),
[remove_grid_row_index(r) for r in grid_rows]
)
def assert_grid_row_equal(self, grid_rows_a, grid_rows_b):
self.assertEqual(
[remove_grid_row_index(r) for r in grid_rows_a],
[remove_grid_row_index(r) for r in grid_rows_b]
)
def create_contract(self, biz_contract_page, contract_type, start_date, end_date, contractor_organization='', contractor_organization_name=None,
detail_info=None, register_type='ERS'):
"""
Register a contract.
"""
biz_contract_page.click_register_button()
biz_contract_detail_page = BizContractDetailPage(self.browser).wait_for_page()
contract_name = 'test_contract_' + self.unique_id[0:8]
invitation_code = self.unique_id[0:8]
biz_contract_detail_page.input(contract_name=contract_name, contract_type=contract_type, register_type=register_type,
invitation_code=invitation_code, start_date=start_date,
end_date=end_date, contractor_organization=contractor_organization)
if contractor_organization_name:
biz_contract_detail_page.select_contractor_organization(contractor_organization_name)
if detail_info:
for i, course_id in enumerate(detail_info):
biz_contract_detail_page.add_detail_info(course_id, i + 1)
biz_contract_detail_page.click_register_button()
BizContractPage(self.browser).wait_for_page()
self.assertIn("The new contract has been added.", biz_contract_page.messages)
self.assert_grid_row(
biz_contract_page.get_row({'Contract Name': contract_name}),
{
'Contract Name': contract_name,
'Invitation Code': invitation_code,
'Contract Start Date': start_date,
'Contract End Date': end_date
}
)
return biz_contract_page.get_row({'Contract Name': contract_name})
def create_aggregator(self, with_contract_count=1):
new_aggregator = self.register_user()
new_org_info = self.register_organization(PLATFORMER_USER_INFO)
new_contracts = []
for i in range(with_contract_count):
new_contracts.append(self.register_contract(PLATFORMER_USER_INFO, new_org_info['Organization Name'], 'O'))
self.grant(PLATFORMER_USER_INFO, new_org_info['Organization Name'], 'aggregator', new_aggregator)
return new_aggregator, new_org_info, new_contracts
@property
def new_password(self):
return 'Aa0' + get_random_string(12)
@property
def new_user_info(self):
username = 'test_' + get_random_string(12)
return {
'username': username,
'password': self.new_password,
'email': username + '@example.com',
}
def grant(self, operator, organization_name, permission, grant_to_user_info):
self.switch_to_user(operator)
biz_manager_page = DashboardPage(self.browser).visit().click_biz().click_manager().select(organization_name, permission)
self.assertNotIn(grant_to_user_info['username'], biz_manager_page.names)
self.assertNotIn(grant_to_user_info['email'], biz_manager_page.emails)
biz_manager_page.input_user(grant_to_user_info['username']).click_grant()
self.assertIn(grant_to_user_info['username'], biz_manager_page.names)
self.assertIn(grant_to_user_info['email'], biz_manager_page.emails)
biz_manager_page.refresh_page().select(organization_name, permission)
self.assertIn(grant_to_user_info['username'], biz_manager_page.names)
self.assertIn(grant_to_user_info['email'], biz_manager_page.emails)
def register_contract(self, operator, contractor_organization_name, contract_type='PF', register_type='ERS',
start_date='2000/01/01', end_date='2100/12/31', detail_info=None):
self.switch_to_user(operator)
return self.create_contract(
DashboardPage(self.browser).visit().click_biz().click_contract(),
contract_type,
start_date,
end_date,
contractor_organization_name=contractor_organization_name,
detail_info=detail_info,
register_type=register_type,
)
def register_organization(self, operator):
self.switch_to_user(operator)
biz_organization_page = DashboardPage(self.browser).visit().click_biz().click_organization()
org_code = 'test_' + self.unique_id[0:8]
org_name = 'org name ' + org_code
biz_organization_page.click_add().input(org_name, org_code).click_register()
self.assertIn('The new organization has been added.', biz_organization_page.messages)
new_organization = biz_organization_page.get_row({
'Organization Name': org_name,
'Organization Code': org_code,
})
self.assertIsNotNone(new_organization)
return new_organization
def register_invitation(self, invitation_code, additional_info):
"""
Register invitation code
"""
BizInvitationPage(self.browser).visit().input_invitation_code(invitation_code).click_register_button()
invitation_confirm_page = BizInvitationConfirmPage(self.browser, invitation_code).wait_for_page()
if additional_info:
for i, additional_name in enumerate(additional_info):
invitation_confirm_page.input_additional_info(additional_name, i)
invitation_confirm_page.click_register_button()
return GaDashboardPage(self.browser).wait_for_page()
|
nttks/edx-platform
|
common/test/acceptance/tests/biz/__init__.py
|
Python
|
agpl-3.0
| 8,798
|
[
"VisIt"
] |
f32ce8d5f97112eb8a621f711a5dba680cdb880bdf7204a492dedc45752f5cae
|
# -*- coding: utf-8 -*-
#
# recording_demo.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Recording examples
------------------
This script demonstrates how to select different recording backends
and read the result data back in. The simulated network itself is
rather boring with only a single poisson generater stimulating a
single neuron, so we get some data.
"""
import nest
import numpy as np
def setup(record_to, time_in_steps):
"""Set up the network with the given parameters."""
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True})
pg_params = {'rate': 1000000.}
sr_params = {'record_to': record_to, 'time_in_steps': time_in_steps}
n = nest.Create('iaf_psc_exp')
pg = nest.Create('poisson_generator', 1, pg_params)
sr = nest.Create('spike_recorder', 1, sr_params)
nest.Connect(pg, n, syn_spec={'weight': 10.})
nest.Connect(n, sr)
return sr
def get_data(sr):
"""Get recorded data from the spike_recorder."""
if sr.record_to == 'ascii':
return np.loadtxt(f'{sr.filenames[0]}', dtype=object)
if sr.record_to == 'memory':
return sr.get('events')
# Just loop through some recording backends and settings
for time_in_steps in (True, False):
for record_to in ('ascii', 'memory'):
sr = setup(record_to, time_in_steps)
nest.Simulate(30.0)
data = get_data(sr)
print(f"simulation resolution in ms: {nest.GetKernelStatus('resolution')}")
print(f"data recorded by recording backend {record_to} (time_in_steps={time_in_steps})")
print(data)
|
stinebuu/nest-simulator
|
pynest/examples/recording_demo.py
|
Python
|
gpl-2.0
| 2,234
|
[
"NEURON"
] |
057c7271fa4f253260a1bf290aada572a531776ed2b840fea7a585f7970ef492
|
import os
from Bio.Nexus import Nexus
def check_taxa(matrices):
'''Checks that nexus instances in a list [(name, instance)...] have
the same taxa, provides useful error if not and returns None if
everything matches
From: http://biopython.org/wiki/Concatenate_nexus
'''
first_taxa = matrices[0][1].taxlabels
for name, matrix in matrices[1:]:
first_only = [t for t in first_taxa if t not in matrix.taxlabels]
new_only = [t for t in matrix.taxlabels if t not in first_taxa]
if first_only:
missing = ', '.join(first_only)
msg = '%s taxa %s not in martix %s' % (matrices[0][0], missing, name)
raise Nexus.NexusError(msg)
elif new_only:
missing = ', '.join(new_only)
msg = '%s taxa %s not in all matrices' % (name, missing)
raise Nexus.NexusError(msg)
return None # will only get here if it hasn't thrown an exception
def concat(mypath, same_taxa):
''' Combine multiple nexus data matrices in one partitioned file.
By default this will only work if the same taxa are present in each file
use same_taxa=False if you are not concerned by this
From: http://biopython.org/wiki/Concatenate_nexus
small change: added onlyfiles block to remove hidden files
'''
onlyfiles = []
for item in os.listdir(mypath):
if not item.startswith('.') and os.path.isfile(os.path.join(mypath, item)):
onlyfiles.append(item)
nexi = []
for nex in onlyfiles:
nex_open = open(nex, 'r')
nex_save = Nexus.Nexus(nex_open)
nexi.append((nex, nex_save))
if same_taxa:
if not check_taxa(nexi):
return Nexus.combine(nexi)
else:
return Nexus.combine(nexi)
def output_conc_nex(mypath, outfilename, same_taxa=False):
os.chdir(mypath)
combined = concat(mypath, same_taxa)
combined.write_nexus_data(filename=open('%s.nex' % (outfilename), 'w'))
return None
### how to use it (below)
mypath = '/Users/Tagliacollo/Downloads/sate-gblocks-clean-min-52-taxa'
outfilename = 'Harrington_2016'
output_conc_nex(mypath, outfilename)
|
Tagliacollo/Phyloworks
|
py_scripts/conc_nexus_aln.py
|
Python
|
gpl-3.0
| 2,208
|
[
"Biopython"
] |
c32c049c06a85980d81e9bac7d1245648bd65cc11097cdd2dc2e23da6161961d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.