text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright 2017-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain
# Timothy Berkelbach <tim.berkelbach@gmail.com>
#
#import numpy as np
from itertools import product
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc.lib import kpts_helper
import numpy
from pyscf.lib.parameters import LARGE_DENOM # noqa
from pyscf.pbc.mp.kmp2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx) # noqa
from pyscf.pbc.cc.kccsd_rhf import _get_epq
from pyscf.pbc.cc.kccsd_t_rhf import _get_epqr
#einsum = numpy.einsum
einsum = lib.einsum
#################################################
# FOLLOWING: #
# J. Gauss and J. F. Stanton, #
# J. Chem. Phys. 103, 3561 (1995) Table III #
#################################################
### Section (a)
def make_tau(cc, t2, t1, t1p, kconserv, fac=1., out=None):
nkpts, nocc, nvir = t1.shape
tau1 = numpy.ndarray(t2.shape, dtype=t2.dtype, buffer=out)
tau1[:] = t2
for ki in range(nkpts):
for ka in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki,ka,kj]
tmp = numpy.zeros((nocc,nocc,nvir,nvir),dtype=t2.dtype)
if ki == ka and kj == kb:
tmp += einsum('ia,jb->ijab',t1[ki],t1p[kj])
if ki == kb and kj == ka:
tmp -= einsum('ib,ja->ijab',t1[ki],t1p[kj])
if kj == ka and ki == kb:
tmp -= einsum('ja,ib->ijab',t1[kj],t1p[ki])
if kj == kb and ki == ka:
tmp += einsum('jb,ia->ijab',t1[kj],t1p[ki])
tau1[ki,kj,ka] += fac*0.5*tmp
return tau1
def cc_Fvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:].copy()
fvv = eris.fock[:,nocc:,nocc:].copy()
# <o(k1)v(k2)||v(k3)v(k4)> = <v(k2)o(k1)||v(k4)v(k3)> = -<v(k2)o(k1)||v(k3)v(k4)>
eris_vovv = -eris.ovvv.transpose(1,0,2,4,3,5,6)
tau_tilde = make_tau(cc,t2,t1,t1,kconserv,fac=0.5)
Fae = numpy.zeros(fvv.shape, t1.dtype)
#kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
for ka in range(nkpts):
Fae[ka] += fvv[ka]
Fae[ka] += -0.5*einsum('me,ma->ae',fov[ka],t1[ka])
for km in range(nkpts):
Fae[ka] += einsum('mf,amef->ae',t1[km],eris_vovv[ka,km,ka])
for kn in range(nkpts):
#kb = kconserv[km,ka,kn]
Fae[ka] += -0.5*einsum('mnaf,mnef->ae',tau_tilde[km,kn,ka],
eris.oovv[km,kn,ka])
return Fae
def cc_Foo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:].copy()
foo = eris.fock[:,:nocc,:nocc].copy()
tau_tilde = make_tau(cc,t2,t1,t1,kconserv,fac=0.5)
Fmi = numpy.zeros(foo.shape, t1.dtype)
for km in range(nkpts):
Fmi[km] += foo[km]
Fmi[km] += 0.5*einsum('me,ie->mi',fov[km],t1[km])
for kn in range(nkpts):
Fmi[km] += einsum('ne,mnie->mi',t1[kn],eris.ooov[km,kn,km])
for ke in range(nkpts):
Fmi[km] += 0.5*einsum('inef,mnef->mi',tau_tilde[km,kn,ke],
eris.oovv[km,kn,ke])
return Fmi
def cc_Fov(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:].copy()
Fme = numpy.zeros(fov.shape, t1.dtype)
for km in range(nkpts):
Fme[km] += fov[km]
for kf in range(nkpts):
kn = kf
Fme[km] -= einsum('nf,mnfe->me',t1[kf],eris.oovv[km,kn,kf])
return Fme
def cc_Woooo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
tau = make_tau(cc,t2,t1,t1,kconserv)
Wmnij = eris.oooo.copy()
for km in range(nkpts):
for kn in range(nkpts):
# Since it's not enough just to switch i and j and need to create the k_i and k_j
# so that P(ij) switches both i,j and k_i,k_j
# t1[ k_j, j, e ] * v[ k_m, k_n, k_i, m, n, i, e ] -> tmp[ k_i, k_j, m, n, i, j ]
# Here, x = k_j and y = k_i
tmp = einsum('xje,ymnie->yxmnij',t1,eris.ooov[km,kn])
tmp = tmp - tmp.transpose(1,0,2,3,5,4)
ki = numpy.arange(nkpts)
kj = kconserv[km,ki,kn]
kij = (ki,kj)
Wmnij[km,kn,:] += 0.25*einsum('yxijef,xmnef->ymnij',tau[kij],eris.oovv[km,kn])
for ki in range(nkpts):
kj = kconserv[km,ki,kn]
Wmnij[km,kn,ki] += tmp[ki,kj]
return Wmnij
def cc_Wvvvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
eris_vovv = - eris.ovvv.transpose(1,0,2,4,3,5,6)
tau = make_tau(cc,t2,t1,t1,kconserv)
Wabef = eris.vvvv.copy()
for ka in range(nkpts):
for kb in range(nkpts):
km = numpy.arange(nkpts).tolist()
kn = kconserv[ka,km,kb].tolist()
kmn = tuple([km,kn])
Wabef[ka,kb] += 0.25*einsum('xmnab,xymnef->yabef',tau.transpose(2,0,1,3,4,5,6)[ka][kmn],eris.oovv[kmn])
for ke in range(nkpts):
km = kb
tmp = einsum('mb,amef->abef',t1[kb],eris_vovv[ka,km,ke])
km = ka
tmp -= einsum('ma,bmef->abef',t1[ka],eris_vovv[kb,km,ke])
Wabef[ka,kb,ke] += -tmp
# km + kn - ka = kb
# => kn = ka - km + kb
return Wabef
def cc_Wovvo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
eris_ovvo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t2.dtype)
eris_oovo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nocc,nvir,nocc),dtype=t2.dtype)
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
# <mb||je> -> -<mb||ej>
eris_ovvo[km,kb,ke] = -eris.ovov[km,kb,kj].transpose(0,1,3,2)
# <mn||je> -> -<mn||ej>
# let kb = kn as a dummy variable
eris_oovo[km,kb,ke] = -eris.ooov[km,kb,kj].transpose(0,1,3,2)
Wmbej = eris_ovvo.copy()
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
Wmbej[km,kb,ke] += einsum('jf,mbef->mbej',t1[kj,:,:],eris.ovvv[km,kb,ke])
Wmbej[km,kb,ke] += -einsum('nb,mnej->mbej',t1[kb,:,:],eris_oovo[km,kb,ke])
temp = numpy.zeros([nkpts, nocc, nocc, nvir, nvir], dtype=t2.dtype)
for kn in range(nkpts):
kf = kconserv[km,ke,kn]
temp[kn] = -0.5*t2[kj,kn,kf].copy()
if kn == kb and kf == kj:
temp[kn] -= einsum('jf,nb->jnfb', t1[kj], t1[kn])
Wmbej[km,kb,ke] += einsum('xjnfb, xmnef->mbej', temp, eris.oovv[km,:,ke])
return Wmbej
def cc_Wovvo_jk(cc, t1, t2, eris, kconserv):
nkpts, nocc, nvir = t1.shape
Wmbej = eris.ovvo.copy()
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
Wmbej[km,kb,ke] += einsum('jf,mbef->mbej',t1[kj,:,:],eris.ovvv[km,kb,ke])
Wmbej[km,kb,ke] += -einsum('nb,mnej->mbej',t1[kb,:,:],eris.oovo[km,kb,ke])
for kn in range(nkpts):
kf = kconserv[km,ke,kn]
Wmbej[km,kb,ke] += -0.5*einsum('jnfb,mnef->mbej',t2[kj,kn,kf],
eris.oovv[km,kn,ke])
if kn == kb and kf == kj:
Wmbej[km,kb,ke] += -einsum('jf,nb,mnef->mbej',t1[kj],t1[kn],
eris.oovv[km,kn,ke])
return Wmbej
### Section (b)
def Fvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
ccFov = cc_Fov(cc,t1,t2,eris,kconserv)
Fae = cc_Fvv(cc,t1,t2,eris,kconserv)
for km in range(nkpts):
Fae[km] -= 0.5*einsum('ma,me->ae', t1[km], ccFov[km])
return Fae
def Foo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
ccFov = cc_Fov(cc,t1,t2,eris,kconserv)
Fmi = cc_Foo(cc,t1,t2,eris,kconserv)
for km in range(nkpts):
Fmi[km] += 0.5*einsum('ie,me->mi',t1[km],ccFov[km])
return Fmi
def Fov(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Fme = cc_Fov(cc,t1,t2,eris,kconserv)
return Fme
def Woooo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
tau = make_tau(cc,t2,t1,t1,kconserv)
Wmnij = cc_Woooo(cc,t1,t2,eris,kconserv)
for km in range(nkpts):
for kn in range(nkpts):
for ki in range(nkpts):
kj = kconserv[km ,ki, kn]
Wmnij[km, kn, ki] += 0.25*einsum('xijef,xmnef->mnij',tau[ki, kj, :],
eris.oovv[km, kn, :])
return Wmnij
def Wvvvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
tau = make_tau(cc,t2,t1,t1,kconserv)
Wabef = cc_Wvvvv(cc,t1,t2,eris,kconserv)
for ka, kb, ke in kpts_helper.loop_kkk(nkpts):
#kf = kconserv[ka, ke, kb]
for km in range(nkpts):
kn = kconserv[ka, km, kb]
Wabef[ka, kb, ke] += 0.25*einsum('mnab,mnef->abef',tau[km, kn, ka],
eris.oovv[km, kn, ke])
return Wabef
def Wovvo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wmbej = cc_Wovvo(cc,t1,t2,eris,kconserv)
for km, kb, ke in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
for kn in range(nkpts):
kf = kconserv[km, ke, kn]
Wmbej[km, kb, ke] -= 0.5*einsum('jnfb,mnef->mbej',t2[kj, kn, kf],
eris.oovv[km, kn, ke])
return Wmbej
# Indices in the following can be safely permuted.
def Wooov(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wmnie = eris.ooov.copy()
for km, kn, ki in kpts_helper.loop_kkk(nkpts):
kf = ki
Wmnie[km, kn, ki] += einsum('if,mnfe->mnie',t1[ki], eris.oovv[km, kn, kf])
return Wmnie
def Wvovv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wamef = numpy.empty((nkpts, nkpts, nkpts, nvir, nocc, nvir, nvir), dtype=eris.ovvv.dtype)
for ka, km, ke in kpts_helper.loop_kkk(nkpts):
kn = ka
Wamef[ka, km, ke] = -eris.ovvv[km, ka, ke].transpose(1, 0, 2, 3)
Wamef[ka, km, ke] -= einsum('na,nmef->amef',t1[kn],eris.oovv[kn, km, ke])
return Wamef
def Wovoo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wmbij = eris.ovoo.copy()
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
for kn in range(nkpts):
Wmbij[km, kb, ki] += einsum('mnie,jnbe->mbij', eris.ooov[km, kn, ki], t2[kj, kn, kb])
Wmbij[km, kb, ki] += einsum('ie,mbej->mbij', t1[ki], -eris.ovov[km, kb, kj].transpose(0, 1, 3, 2))
for kf in range(nkpts):
kn = kconserv[kb, kj, kf]
Wmbij[km, kb, ki] -= einsum('ie,njbf,mnef->mbij', t1[ki], t2[kn, kj, kb], eris.oovv[km, kn, ki])
# P(ij)
for kn in range(nkpts):
Wmbij[km, kb, ki] -= einsum('mnje,inbe->mbij', eris.ooov[km, kn, kj], t2[ki, kn, kb])
Wmbij[km, kb, ki] -= einsum('je,mbei->mbij', t1[kj], -eris.ovov[km, kb, ki].transpose(0, 1, 3, 2))
for kf in range(nkpts):
kn = kconserv[kb, ki, kf]
Wmbij[km, kb, ki] += einsum('je,nibf,mnef->mbij', t1[kj], t2[kn, ki, kb], eris.oovv[km, kn, kj])
FFov = Fov(cc,t1,t2,eris,kconserv)
WWoooo = Woooo(cc,t1,t2,eris,kconserv)
tau = make_tau(cc,t2,t1,t1,kconserv)
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
Wmbij[km, kb, ki] -= einsum('me,ijbe->mbij', FFov[km], t2[ki, kj, kb])
Wmbij[km, kb, ki] -= einsum('nb,mnij->mbij', t1[kb], WWoooo[km, kb, ki])
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
Wmbij[km, kb, ki] += 0.5 * einsum('xmbef,xijef->mbij', eris.ovvv[km, kb, :], tau[ki, kj, :])
return Wmbij
def Wvvvo(cc,t1,t2,eris,kconserv,WWvvvv=None):
nkpts, nocc, nvir = t1.shape
FFov = Fov(cc,t1,t2,eris,kconserv)
if WWvvvv is None:
WWvvvv = Wvvvv(cc,t1,t2,eris,kconserv)
eris_ovvo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t2.dtype)
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
eris_ovvo[km,kb,ke] = -eris.ovov[km,kb,kj].transpose(0,1,3,2)
tmp1 = numpy.zeros((nkpts, nkpts, nkpts, nvir, nvir, nvir, nocc),dtype=t2.dtype)
tmp2 = numpy.zeros((nkpts, nkpts, nkpts, nvir, nvir, nvir, nocc),dtype=t2.dtype)
for ka, kb, ke in kpts_helper.loop_kkk(nkpts):
ki = kconserv[ka,ke,kb]
tmp2[ka,kb,ke] += einsum('ma,mbei->abei',t1[ka],eris_ovvo[ka,kb,ke])
for kn in range(nkpts):
tmp2[ka,kb,ke] -= einsum('ma,nibf,mnef->abei',t1[ka],t2[kn,ki,kb],eris.oovv[ka,kn,ke])
for km in range(nkpts):
tmp1[ka,kb,ke] += einsum('mbef,miaf->abei',eris.ovvv[km,kb,ke],t2[km,ki,ka])
tau = make_tau(cc,t2,t1,t1,kconserv)
Wabei = -tmp1 + tmp1.transpose(1,0,2,4,3,5,6)
Wabei -= tmp2 - tmp2.transpose(1,0,2,4,3,5,6)
for ka, kb, ke in kpts_helper.loop_kkk(nkpts):
ki = kconserv[ka, ke, kb]
Wabei[ka, kb, ke] += eris.ovvv[ki, ke, kb].conj().transpose(3, 2, 1, 0)
Wabei[ka, kb, ke] += einsum('if,abef->abei',t1[ki],WWvvvv[ka, kb, ke])
Wabei[ka, kb, ke] -= einsum('me,miab->abei',FFov[ke],t2[ke, ki, ka])
for km in range(nkpts):
kn = kconserv[ka, km, kb]
Wabei[ka, kb, ke] += 0.5 * einsum('nmie,mnab->abei',
eris.ooov[kn, km, ki],
tau[km, kn, ka])
return Wabei
def get_full_t3p2(mycc, t1, t2, eris):
'''Build the entire T3[2] array in memory.
'''
nkpts = mycc.nkpts
nocc = mycc.nocc
nmo = mycc.nmo
nvir = nmo - nocc
kconserv = mycc.khelper.kconserv
def get_wijkabc(ki, kj, kk, ka, kb, kc):
'''Build T3[2] for `ijkabc` at a given set of k-points'''
km = kconserv[kc, kk, kb]
kf = kconserv[kk, kc, kj]
ret = einsum('kjcf,ifab->ijkabc', t2[kk,kj,kc], eris.ovvv[ki,kf,ka].conj())
ret = ret - einsum('jima,mkbc->ijkabc', eris.ooov[kj,ki,km].conj(), t2[km,kk,kb])
return ret
#fock = eris.fock
#fov = fock[:, :nocc, nocc:]
#foo = numpy.array([fock[ikpt, :nocc, :nocc].diagonal() for ikpt in range(nkpts)])
#fvv = numpy.array([fock[ikpt, nocc:, nocc:].diagonal() for ikpt in range(nkpts)])
mo_energy_occ = numpy.array([eris.mo_energy[ki][:nocc] for ki in range(nkpts)])
mo_energy_vir = numpy.array([eris.mo_energy[ki][nocc:] for ki in range(nkpts)])
mo_e_o = mo_energy_occ
mo_e_v = mo_energy_vir
# Get location of padded elements in occupied and virtual space
nonzero_opadding, nonzero_vpadding = padding_k_idx(mycc, kind="split")
t3 = numpy.empty((nkpts,nkpts,nkpts,nkpts,nkpts,nocc,nocc,nocc,nvir,nvir,nvir),
dtype=t2.dtype)
for ki, kj, kk, ka, kb in product(range(nkpts), repeat=5):
kc = kpts_helper.get_kconserv3(mycc._scf.cell, mycc.kpts,
[ki, kj, kk, ka, kb])
# Perform P(abc)
t3[ki,kj,kk,ka,kb] = get_wijkabc(ki,kj,kk,ka,kb,kc)
t3[ki,kj,kk,ka,kb] += get_wijkabc(ki,kj,kk,kb,kc,ka).transpose(0,1,2,5,3,4)
t3[ki,kj,kk,ka,kb] += get_wijkabc(ki,kj,kk,kc,ka,kb).transpose(0,1,2,4,5,3)
# Perform P(ijk)
t3 = (t3.transpose(0,1,2,3,4,5,6,7,8,9,10) +
t3.transpose(1,2,0,3,4,6,7,5,8,9,10) +
t3.transpose(2,0,1,3,4,7,5,6,8,9,10))
for ki, kj, kk in product(range(nkpts), repeat=3):
eijk = _get_epqr([0,nocc,ki,mo_e_o,nonzero_opadding],
[0,nocc,kj,mo_e_o,nonzero_opadding],
[0,nocc,kk,mo_e_o,nonzero_opadding])
for ka, kb in product(range(nkpts), repeat=2):
kc = kpts_helper.get_kconserv3(mycc._scf.cell, mycc.kpts,
[ki, kj, kk, ka, kb])
eabc = _get_epqr([0,nvir,ka,mo_e_v,nonzero_vpadding],
[0,nvir,kb,mo_e_v,nonzero_vpadding],
[0,nvir,kc,mo_e_v,nonzero_vpadding],
fac=[-1.,-1.,-1.])
eijkabc = eijk[:, :, :, None, None, None] + eabc[None, None, None, :, :, :]
t3[ki,kj,kk,ka,kb] /= eijkabc
return t3
def get_t3p2_imds_slow(cc, t1, t2, eris=None, t3p2_ip_out=None, t3p2_ea_out=None):
"""Calculates T1, T2 amplitudes corrected by second-order T3 contribution
and intermediates used in IP/EA-CCSD(T)a
Args:
cc (:obj:`KGCCSD`):
Object containing coupled-cluster results.
t1 (:obj:`ndarray`):
T1 amplitudes.
t2 (:obj:`ndarray`):
T2 amplitudes from which the T3[2] amplitudes are formed.
eris (:obj:`_PhysicistsERIs`):
Antisymmetrized electron-repulsion integrals in physicist's notation.
t3p2_ip_out (:obj:`ndarray`):
Store results of the intermediate used in IP-EOM-CCSD(T)a.
t3p2_ea_out (:obj:`ndarray`):
Store results of the intermediate used in EA-EOM-CCSD(T)a.
Returns:
delta_ccsd (float):
Difference of perturbed and unperturbed CCSD ground-state energy,
energy(T1 + T1[2], T2 + T2[2]) - energy(T1, T2)
pt1 (:obj:`ndarray`):
Perturbatively corrected T1 amplitudes.
pt2 (:obj:`ndarray`):
Perturbatively corrected T2 amplitudes.
Reference:
D. A. Matthews, J. F. Stanton "A new approach to approximate..."
JCP 145, 124102 (2016); DOI:10.1063/1.4962910, Equation 14
Shavitt and Bartlett "Many-body Methods in Physics and Chemistry"
2009, Equation 10.33
"""
if eris is None:
eris = cc.ao2mo()
fock = eris.fock
nkpts, nocc, nvir = t1.shape
kconserv = cc.khelper.kconserv
fov = [fock[ikpt, :nocc, nocc:] for ikpt in range(nkpts)]
#foo = [fock[ikpt, :nocc, :nocc].diagonal() for ikpt in range(nkpts)]
#fvv = [fock[ikpt, nocc:, nocc:].diagonal() for ikpt in range(nkpts)]
mo_energy_occ = numpy.array([eris.mo_energy[ki][:nocc] for ki in range(nkpts)])
mo_energy_vir = numpy.array([eris.mo_energy[ki][nocc:] for ki in range(nkpts)])
# Get location of padded elements in occupied and virtual space
nonzero_opadding, nonzero_vpadding = padding_k_idx(cc, kind="split")
mo_e_o = mo_energy_occ
mo_e_v = mo_energy_vir
ccsd_energy = cc.energy(t1, t2, eris)
dtype = numpy.result_type(t1, t2)
if t3p2_ip_out is None:
t3p2_ip_out = numpy.zeros((nkpts,nkpts,nkpts,nocc,nvir,nocc,nocc), dtype=dtype)
Wmcik = t3p2_ip_out
if t3p2_ea_out is None:
t3p2_ea_out = numpy.zeros((nkpts,nkpts,nkpts,nvir,nvir,nvir,nocc), dtype=dtype)
Wacek = t3p2_ea_out
t3 = get_full_t3p2(cc, t1, t2, eris)
pt1 = numpy.zeros((nkpts, nocc, nvir), dtype=dtype)
for ki in range(nkpts):
ka = ki
for km, kn, ke in product(range(nkpts), repeat=3):
pt1[ki] += 0.25 * lib.einsum('mnef,imnaef->ia', eris.oovv[km,kn,ke], t3[ki,km,kn,ka,ke])
eia = _get_epq([0,nocc,ki,mo_e_o,nonzero_opadding],
[0,nvir,ka,mo_e_v,nonzero_vpadding],
fac=[1.0,-1.0])
pt1[ki] /= eia
pt2 = numpy.zeros((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=dtype)
for ki, kj, ka in product(range(nkpts), repeat=3):
kb = kconserv[ki,ka,kj]
for km in range(nkpts):
pt2[ki,kj,ka] += lib.einsum('ijmabe,me->ijab', t3[ki,kj,km,ka,kb], fov[km])
for ke in range(nkpts):
kf = kconserv[km,ke,kb]
pt2[ki,kj,ka] += 0.5 * lib.einsum('ijmaef,mbfe->ijab', t3[ki,kj,km,ka,ke], eris.ovvv[km,kb,kf])
kf = kconserv[km,ke,ka]
pt2[ki,kj,ka] -= 0.5 * lib.einsum('ijmbef,mafe->ijab', t3[ki,kj,km,kb,ke], eris.ovvv[km,ka,kf])
for kn in range(nkpts):
pt2[ki,kj,ka] -= 0.5 * lib.einsum('inmabe,nmje->ijab', t3[ki,kn,km,ka,kb], eris.ooov[kn,km,kj])
pt2[ki,kj,ka] += 0.5 * lib.einsum('jnmabe,nmie->ijab', t3[kj,kn,km,ka,kb], eris.ooov[kn,km,ki])
eia = _get_epq([0,nocc,ki,mo_e_o,nonzero_opadding],
[0,nvir,ka,mo_e_v,nonzero_vpadding],
fac=[1.0,-1.0])
ejb = _get_epq([0,nocc,kj,mo_e_o,nonzero_opadding],
[0,nvir,kb,mo_e_v,nonzero_vpadding],
fac=[1.0,-1.0])
eijab = eia[:, None, :, None] + ejb[:, None, :]
pt2[ki,kj,ka] /= eijab
pt1 += t1
pt2 += t2
for ki, kj, kk, ka, kb in product(range(nkpts), repeat=5):
kc = kpts_helper.get_kconserv3(cc._scf.cell, cc.kpts,
[ki, kj, kk, ka, kb])
tmp = t3[ki,kj,kk,ka,kb]
km = kconserv[ki,kc,kk]
ke = kconserv[ka,kk,kc]
Wmcik[km,kc,ki] += 0.5*lib.einsum('ijkabc,mjab->mcik', tmp, eris.oovv[km,kj,ka])
Wacek[ka,kc,ke] += -0.5*lib.einsum('ijkabc,ijeb->acek', tmp, eris.oovv[ki,kj,ke])
delta_ccsd_energy = cc.energy(pt1, pt2, eris) - ccsd_energy
logger.info(cc, 'CCSD energy T3[2] correction : %14.8e', delta_ccsd_energy)
return delta_ccsd_energy, pt1, pt2, Wmcik, Wacek
|
sunqm/pyscf
|
pyscf/pbc/cc/kintermediates.py
|
Python
|
apache-2.0
| 22,280
|
[
"PySCF"
] |
a46f3751e76dd8453462be572d0375d15ab423e62a39edec70db4c77ce66cc9b
|
import gc
import io
import random
import re
import string
import tempfile
from os import environ as env
import h5py
import netCDF4
import numpy as np
import pytest
from packaging import version
from pytest import raises
import h5netcdf
from h5netcdf import legacyapi
from h5netcdf.core import NOT_A_VARIABLE, CompatibilityError
try:
import h5pyd
without_h5pyd = False
except ImportError:
without_h5pyd = True
remote_h5 = ("http:", "hdf5:")
@pytest.fixture()
def restapi(pytestconfig):
return pytestconfig.getoption("restapi")
@pytest.fixture
def tmp_local_netcdf(tmpdir):
return str(tmpdir.join("testfile.nc"))
@pytest.fixture(params=["testfile.nc", "hdf5://testfile"])
def tmp_local_or_remote_netcdf(request, tmpdir, restapi):
if request.param.startswith(remote_h5):
if not restapi:
pytest.skip("Do not test with HDF5 REST API")
elif without_h5pyd:
pytest.skip("h5pyd package not available")
if any([env.get(v) is None for v in ("HS_USERNAME", "HS_PASSWORD")]):
pytest.skip("HSDS username and/or password missing")
rnd = "".join(random.choice(string.ascii_uppercase) for _ in range(5))
return (
env["HS_ENDPOINT"]
+ env["H5PYD_TEST_FOLDER"]
+ "/"
+ "testfile"
+ rnd
+ ".nc"
)
else:
return str(tmpdir.join(request.param))
@pytest.fixture(params=[True, False])
def decode_vlen_strings(request):
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
return dict(decode_vlen_strings=request.param)
else:
return {}
@pytest.fixture(params=[netCDF4, legacyapi])
def netcdf_write_module(request):
return request.param
def get_hdf5_module(resource):
"""Return the correct h5py module based on the input resource."""
if isinstance(resource, str) and resource.startswith(remote_h5):
return h5pyd
else:
return h5py
def string_to_char(arr):
"""Like nc4.stringtochar, but faster and more flexible."""
# ensure the array is contiguous
arr = np.array(arr, copy=False, order="C")
kind = arr.dtype.kind
if kind not in ["U", "S"]:
raise ValueError("argument must be a string")
return arr.reshape(arr.shape + (1,)).view(kind + "1")
def array_equal(a, b):
a, b = map(np.array, (a[...], b[...]))
if a.shape != b.shape:
return False
try:
return np.allclose(a, b)
except TypeError:
return (a == b).all()
_char_array = string_to_char(np.array(["a", "b", "c", "foo", "bar", "baz"], dtype="S"))
_string_array = np.array(
[["foobar0", "foobar1", "foobar3"], ["foofoofoo", "foofoobar", "foobarbar"]]
)
_vlen_string = "foo"
def is_h5py_char_working(tmp_netcdf, name):
h5 = get_hdf5_module(tmp_netcdf)
# https://github.com/Unidata/netcdf-c/issues/298
with h5.File(tmp_netcdf, "r") as ds:
v = ds[name]
try:
assert array_equal(v, _char_array)
return True
except Exception as e:
if re.match("^Can't read data", e.args[0]):
return False
else:
raise
def write_legacy_netcdf(tmp_netcdf, write_module):
ds = write_module.Dataset(tmp_netcdf, "w")
ds.setncattr("global", 42)
ds.other_attr = "yes"
ds.createDimension("x", 4)
ds.createDimension("y", 5)
ds.createDimension("z", 6)
ds.createDimension("empty", 0)
ds.createDimension("string3", 3)
ds.createDimension("unlimited", None)
v = ds.createVariable("foo", float, ("x", "y"), chunksizes=(4, 5), zlib=True)
v[...] = 1
v.setncattr("units", "meters")
v = ds.createVariable("y", int, ("y",), fill_value=-1)
v[:4] = np.arange(4)
v = ds.createVariable("z", "S1", ("z", "string3"), fill_value=b"X")
v[...] = _char_array
v = ds.createVariable("scalar", np.float32, ())
v[...] = 2.0
# test creating a scalar with compression option (with should be ignored)
v = ds.createVariable("intscalar", np.int64, (), zlib=6, fill_value=None)
v[...] = 2
v = ds.createVariable("foo_unlimited", float, ("x", "unlimited"))
v[...] = 1
with raises((h5netcdf.CompatibilityError, TypeError)):
ds.createVariable("boolean", np.bool_, ("x"))
g = ds.createGroup("subgroup")
v = g.createVariable("subvar", np.int32, ("x",))
v[...] = np.arange(4.0)
g.createDimension("y", 10)
g.createVariable("y_var", float, ("y",))
ds.createDimension("mismatched_dim", 1)
ds.createVariable("mismatched_dim", int, ())
v = ds.createVariable("var_len_str", str, ("x"))
v[0] = "foo"
ds.close()
def write_h5netcdf(tmp_netcdf):
ds = h5netcdf.File(tmp_netcdf, "w")
ds.attrs["global"] = 42
ds.attrs["other_attr"] = "yes"
ds.dimensions = {"x": 4, "y": 5, "z": 6, "empty": 0, "unlimited": None}
v = ds.create_variable(
"foo", ("x", "y"), float, chunks=(4, 5), compression="gzip", shuffle=True
)
v[...] = 1
v.attrs["units"] = "meters"
v = ds.create_variable("y", ("y",), int, fillvalue=-1)
v[:4] = np.arange(4)
v = ds.create_variable("z", ("z", "string3"), data=_char_array, fillvalue=b"X")
v = ds.create_variable("scalar", data=np.float32(2.0))
v = ds.create_variable("intscalar", data=np.int64(2))
v = ds.create_variable("foo_unlimited", ("x", "unlimited"), float)
v[...] = 1
with raises((h5netcdf.CompatibilityError, TypeError)):
ds.create_variable("boolean", data=True)
g = ds.create_group("subgroup")
v = g.create_variable("subvar", ("x",), np.int32)
v[...] = np.arange(4.0)
with raises(AttributeError):
v.attrs["_Netcdf4Dimid"] = -1
g.dimensions["y"] = 10
g.create_variable("y_var", ("y",), float)
g.flush()
ds.dimensions["mismatched_dim"] = 1
ds.create_variable("mismatched_dim", dtype=int)
ds.flush()
dt = h5py.special_dtype(vlen=str)
v = ds.create_variable("var_len_str", ("x",), dtype=dt)
v[0] = _vlen_string
ds.close()
def read_legacy_netcdf(tmp_netcdf, read_module, write_module):
ds = read_module.Dataset(tmp_netcdf, "r")
assert ds.ncattrs() == ["global", "other_attr"]
assert ds.getncattr("global") == 42
if write_module is not netCDF4:
# skip for now: https://github.com/Unidata/netcdf4-python/issues/388
assert ds.other_attr == "yes"
with pytest.raises(AttributeError):
ds.does_not_exist
assert set(ds.dimensions) == set(
["x", "y", "z", "empty", "string3", "mismatched_dim", "unlimited"]
)
assert set(ds.variables) == set(
[
"foo",
"y",
"z",
"intscalar",
"scalar",
"var_len_str",
"mismatched_dim",
"foo_unlimited",
]
)
assert set(ds.groups) == set(["subgroup"])
assert ds.parent is None
v = ds.variables["foo"]
assert array_equal(v, np.ones((4, 5)))
assert v.dtype == float
assert v.dimensions == ("x", "y")
assert v.ndim == 2
assert v.ncattrs() == ["units"]
if write_module is not netCDF4:
assert v.getncattr("units") == "meters"
assert tuple(v.chunking()) == (4, 5)
assert v.filters() == {
"complevel": 4,
"fletcher32": False,
"shuffle": True,
"zlib": True,
}
v = ds.variables["y"]
assert array_equal(v, np.r_[np.arange(4), [-1]])
assert v.dtype == int
assert v.dimensions == ("y",)
assert v.ndim == 1
assert v.ncattrs() == ["_FillValue"]
assert v.getncattr("_FillValue") == -1
assert v.chunking() == "contiguous"
assert v.filters() == {
"complevel": 0,
"fletcher32": False,
"shuffle": False,
"zlib": False,
}
ds.close()
# Check the behavior if h5py. Cannot expect h5netcdf to overcome these
# errors:
if is_h5py_char_working(tmp_netcdf, "z"):
ds = read_module.Dataset(tmp_netcdf, "r")
v = ds.variables["z"]
assert array_equal(v, _char_array)
assert v.dtype == "S1"
assert v.ndim == 2
assert v.dimensions == ("z", "string3")
assert v.ncattrs() == ["_FillValue"]
assert v.getncattr("_FillValue") == b"X"
else:
ds = read_module.Dataset(tmp_netcdf, "r")
v = ds.variables["scalar"]
assert array_equal(v, np.array(2.0))
assert v.dtype == "float32"
assert v.ndim == 0
assert v.dimensions == ()
assert v.ncattrs() == []
v = ds.variables["intscalar"]
assert array_equal(v, np.array(2))
assert v.dtype == "int64"
assert v.ndim == 0
assert v.dimensions == ()
assert v.ncattrs() == []
v = ds.variables["var_len_str"]
assert v.dtype == str
assert v[0] == _vlen_string
v = ds.groups["subgroup"].variables["subvar"]
assert ds.groups["subgroup"].parent is ds
assert array_equal(v, np.arange(4.0))
assert v.dtype == "int32"
assert v.ndim == 1
assert v.dimensions == ("x",)
assert v.ncattrs() == []
v = ds.groups["subgroup"].variables["y_var"]
assert v.shape == (10,)
assert "y" in ds.groups["subgroup"].dimensions
ds.close()
def read_h5netcdf(tmp_netcdf, write_module, decode_vlen_strings):
remote_file = isinstance(tmp_netcdf, str) and tmp_netcdf.startswith(remote_h5)
ds = h5netcdf.File(tmp_netcdf, "r", **decode_vlen_strings)
assert ds.name == "/"
assert list(ds.attrs) == ["global", "other_attr"]
assert ds.attrs["global"] == 42
if write_module is not netCDF4:
# skip for now: https://github.com/Unidata/netcdf4-python/issues/388
assert ds.attrs["other_attr"] == "yes"
assert set(ds.dimensions) == set(
["x", "y", "z", "empty", "string3", "mismatched_dim", "unlimited"]
)
assert set(ds.variables) == set(
[
"foo",
"y",
"z",
"intscalar",
"scalar",
"var_len_str",
"mismatched_dim",
"foo_unlimited",
]
)
assert set(ds.groups) == set(["subgroup"])
assert ds.parent is None
v = ds["foo"]
assert v.name == "/foo"
assert array_equal(v, np.ones((4, 5)))
assert v.dtype == float
assert v.dimensions == ("x", "y")
assert v.ndim == 2
assert list(v.attrs) == ["units"]
if write_module is not netCDF4:
assert v.attrs["units"] == "meters"
assert v.chunks == (4, 5)
assert v.compression == "gzip"
assert v.compression_opts == 4
assert not v.fletcher32
assert v.shuffle
v = ds["y"]
assert array_equal(v, np.r_[np.arange(4), [-1]])
assert v.dtype == int
assert v.dimensions == ("y",)
assert v.ndim == 1
assert list(v.attrs) == ["_FillValue"]
assert v.attrs["_FillValue"] == -1
if not remote_file:
assert v.chunks is None
assert v.compression is None
assert v.compression_opts is None
assert not v.fletcher32
assert not v.shuffle
ds.close()
if is_h5py_char_working(tmp_netcdf, "z"):
ds = h5netcdf.File(tmp_netcdf, "r")
v = ds["z"]
assert array_equal(v, _char_array)
assert v.dtype == "S1"
assert v.ndim == 2
assert v.dimensions == ("z", "string3")
assert list(v.attrs) == ["_FillValue"]
assert v.attrs["_FillValue"] == b"X"
else:
ds = h5netcdf.File(tmp_netcdf, "r", **decode_vlen_strings)
v = ds["scalar"]
assert array_equal(v, np.array(2.0))
assert v.dtype == "float32"
assert v.ndim == 0
assert v.dimensions == ()
assert list(v.attrs) == []
v = ds.variables["intscalar"]
assert array_equal(v, np.array(2))
assert v.dtype == "int64"
assert v.ndim == 0
assert v.dimensions == ()
assert list(v.attrs) == []
v = ds["var_len_str"]
assert h5py.check_dtype(vlen=v.dtype) == str
if getattr(ds, "decode_vlen_strings", True):
assert v[0] == _vlen_string
else:
assert v[0] == _vlen_string.encode("utf_8")
v = ds["/subgroup/subvar"]
assert v is ds["subgroup"]["subvar"]
assert v is ds["subgroup/subvar"]
assert v is ds["subgroup"]["/subgroup/subvar"]
assert v.name == "/subgroup/subvar"
assert ds["subgroup"].name == "/subgroup"
assert ds["subgroup"].parent is ds
assert array_equal(v, np.arange(4.0))
assert v.dtype == "int32"
assert v.ndim == 1
assert v.dimensions == ("x",)
assert list(v.attrs) == []
assert ds["/subgroup/y_var"].shape == (10,)
assert ds["/subgroup"].dimensions["y"].size == 10
ds.close()
def roundtrip_legacy_netcdf(tmp_netcdf, read_module, write_module):
write_legacy_netcdf(tmp_netcdf, write_module)
read_legacy_netcdf(tmp_netcdf, read_module, write_module)
def test_write_legacyapi_read_netCDF4(tmp_local_netcdf):
roundtrip_legacy_netcdf(tmp_local_netcdf, netCDF4, legacyapi)
def test_roundtrip_h5netcdf_legacyapi(tmp_local_netcdf):
roundtrip_legacy_netcdf(tmp_local_netcdf, legacyapi, legacyapi)
def test_write_netCDF4_read_legacyapi(tmp_local_netcdf):
roundtrip_legacy_netcdf(tmp_local_netcdf, legacyapi, netCDF4)
def test_write_h5netcdf_read_legacyapi(tmp_local_netcdf):
write_h5netcdf(tmp_local_netcdf)
read_legacy_netcdf(tmp_local_netcdf, legacyapi, h5netcdf)
def test_write_h5netcdf_read_netCDF4(tmp_local_netcdf):
write_h5netcdf(tmp_local_netcdf)
read_legacy_netcdf(tmp_local_netcdf, netCDF4, h5netcdf)
def test_roundtrip_h5netcdf(tmp_local_or_remote_netcdf, decode_vlen_strings):
write_h5netcdf(tmp_local_or_remote_netcdf)
read_h5netcdf(tmp_local_or_remote_netcdf, h5netcdf, decode_vlen_strings)
def test_write_netCDF4_read_h5netcdf(tmp_local_netcdf, decode_vlen_strings):
write_legacy_netcdf(tmp_local_netcdf, netCDF4)
read_h5netcdf(tmp_local_netcdf, netCDF4, decode_vlen_strings)
def test_write_legacyapi_read_h5netcdf(tmp_local_netcdf, decode_vlen_strings):
write_legacy_netcdf(tmp_local_netcdf, legacyapi)
read_h5netcdf(tmp_local_netcdf, legacyapi, decode_vlen_strings)
def test_fileobj(decode_vlen_strings):
if version.parse(h5py.__version__) < version.parse("2.9.0"):
pytest.skip("h5py > 2.9.0 required to test file-like objects")
fileobj = tempfile.TemporaryFile()
write_h5netcdf(fileobj)
read_h5netcdf(fileobj, h5netcdf, decode_vlen_strings)
fileobj = io.BytesIO()
write_h5netcdf(fileobj)
read_h5netcdf(fileobj, h5netcdf, decode_vlen_strings)
def test_repr(tmp_local_or_remote_netcdf):
write_h5netcdf(tmp_local_or_remote_netcdf)
f = h5netcdf.File(tmp_local_or_remote_netcdf, "a")
assert "h5netcdf.File" in repr(f)
assert "subgroup" in repr(f)
assert "foo" in repr(f)
assert "other_attr" in repr(f)
assert "h5netcdf.attrs.Attributes" in repr(f.attrs)
assert "global" in repr(f.attrs)
d = f.dimensions
assert "h5netcdf.Dimensions" in repr(d)
assert "x=<h5netcdf.Dimension 'x': size 4>" in repr(d)
g = f["subgroup"]
assert "h5netcdf.Group" in repr(g)
assert "subvar" in repr(g)
v = f["foo"]
assert "h5netcdf.Variable" in repr(v)
assert "float" in repr(v)
assert "units" in repr(v)
f.dimensions["temp"] = None
assert "temp: <h5netcdf.Dimension 'temp': size 0 (unlimited)>" in repr(f)
f.resize_dimension("temp", 5)
assert "temp: <h5netcdf.Dimension 'temp': size 5 (unlimited)>" in repr(f)
f.close()
assert "Closed" in repr(f)
assert "Closed" in repr(d)
assert "Closed" in repr(g)
assert "Closed" in repr(v)
def test_attrs_api(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as ds:
ds.attrs["conventions"] = "CF"
ds.attrs["empty_string"] = h5py.Empty(dtype=np.dtype("|S1"))
ds.dimensions["x"] = 1
v = ds.create_variable("x", ("x",), "i4")
v.attrs.update({"units": "meters", "foo": "bar"})
assert ds._closed
with h5netcdf.File(tmp_local_or_remote_netcdf, "r") as ds:
assert len(ds.attrs) == 2
assert dict(ds.attrs) == {"conventions": "CF", "empty_string": b""}
assert list(ds.attrs) == ["conventions", "empty_string"]
assert dict(ds["x"].attrs) == {"units": "meters", "foo": "bar"}
assert len(ds["x"].attrs) == 2
assert sorted(ds["x"].attrs) == ["foo", "units"]
def test_optional_netcdf4_attrs(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, "w") as f:
foo_data = np.arange(50).reshape(5, 10)
f.create_dataset("foo", data=foo_data)
f.create_dataset("x", data=np.arange(5))
f.create_dataset("y", data=np.arange(10))
if version.parse(h5py.__version__) < version.parse("2.10.0"):
f["foo"].dims.create_scale(f["x"])
f["foo"].dims.create_scale(f["y"])
else:
f["x"].make_scale()
f["y"].make_scale()
f["foo"].dims[0].attach_scale(f["x"])
f["foo"].dims[1].attach_scale(f["y"])
with h5netcdf.File(tmp_local_or_remote_netcdf, "r") as ds:
assert ds["foo"].dimensions == ("x", "y")
assert ds.dimensions.keys() == {"x", "y"}
assert ds.dimensions["x"].size == 5
assert ds.dimensions["y"].size == 10
assert array_equal(ds["foo"], foo_data)
def test_error_handling(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as ds:
ds.dimensions["x"] = 1
with raises(ValueError):
ds.dimensions["x"] = 2
with raises(ValueError):
ds.dimensions = {"x": 2}
with raises(ValueError):
ds.dimensions = {"y": 3}
ds.create_variable("x", ("x",), dtype=float)
with raises(ValueError):
ds.create_variable("x", ("x",), dtype=float)
ds.create_group("subgroup")
with raises(ValueError):
ds.create_group("subgroup")
@pytest.mark.skipif(
version.parse(h5py.__version__) < version.parse("3.0.0"),
reason="not needed with h5py < 3.0",
)
def test_decode_string_warning(tmp_local_or_remote_netcdf):
write_h5netcdf(tmp_local_or_remote_netcdf)
with pytest.warns(FutureWarning):
with h5netcdf.File(tmp_local_or_remote_netcdf, "r") as ds:
assert ds.name == "/"
@pytest.mark.skipif(
version.parse(h5py.__version__) < version.parse("3.0.0"),
reason="not needed with h5py < 3.0",
)
def test_decode_string_error(tmp_local_or_remote_netcdf):
write_h5netcdf(tmp_local_or_remote_netcdf)
with pytest.raises(TypeError):
with h5netcdf.legacyapi.Dataset(
tmp_local_or_remote_netcdf, "r", decode_vlen_strings=True
) as ds:
assert ds.name == "/"
def test_mode_warning(tmp_local_or_remote_netcdf):
with pytest.warns(FutureWarning):
with h5netcdf.File(tmp_local_or_remote_netcdf):
pass
def test_unlimited_chunk_warning(tmp_local_or_remote_netcdf):
with pytest.warns(FutureWarning):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as ds:
ds.dimensions = {"x": None}
ds.create_variable("foo", ("x",), float)
def create_invalid_netcdf_data():
foo_data = np.arange(125).reshape(5, 5, 5)
bar_data = np.arange(625).reshape(25, 5, 5)
var = {"foo1": foo_data, "foo2": bar_data, "foo3": foo_data, "foo4": bar_data}
var2 = {"x": 5, "y": 5, "z": 5, "x1": 25, "y1": 5, "z1": 5}
return var, var2
def check_invalid_netcdf4(var, i):
pdim = "phony_dim_{}"
assert var["foo1"].dimensions[0] == pdim.format(i * 4)
assert var["foo1"].dimensions[1] == pdim.format(1 + i * 4)
assert var["foo1"].dimensions[2] == pdim.format(2 + i * 4)
assert var["foo2"].dimensions[0] == pdim.format(3 + i * 4)
assert var["foo2"].dimensions[1] == pdim.format(0 + i * 4)
assert var["foo2"].dimensions[2] == pdim.format(1 + i * 4)
assert var["foo3"].dimensions[0] == pdim.format(i * 4)
assert var["foo3"].dimensions[1] == pdim.format(1 + i * 4)
assert var["foo3"].dimensions[2] == pdim.format(2 + i * 4)
assert var["foo4"].dimensions[0] == pdim.format(3 + i * 4)
assert var["foo4"].dimensions[1] == pdim.format(i * 4)
assert var["foo4"].dimensions[2] == pdim.format(1 + i * 4)
assert var["x"].dimensions[0] == pdim.format(i * 4)
assert var["y"].dimensions[0] == pdim.format(i * 4)
assert var["z"].dimensions[0] == pdim.format(i * 4)
assert var["x1"].dimensions[0] == pdim.format(3 + i * 4)
assert var["y1"].dimensions[0] == pdim.format(i * 4)
assert var["z1"].dimensions[0] == pdim.format(i * 4)
def test_invalid_netcdf4(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, "w") as f:
var, var2 = create_invalid_netcdf_data()
grps = ["bar", "baz"]
for grp in grps:
fx = f.create_group(grp)
for k, v in var.items():
fx.create_dataset(k, data=v)
for k, v in var2.items():
fx.create_dataset(k, data=np.arange(v))
with h5netcdf.File(tmp_local_or_remote_netcdf, "r", phony_dims="sort") as dsr:
for i, grp in enumerate(grps):
var = dsr[grp].variables
check_invalid_netcdf4(var, i)
with h5netcdf.File(tmp_local_or_remote_netcdf, "r", phony_dims="access") as dsr:
for i, grp in enumerate(grps):
var = dsr[grp].variables
check_invalid_netcdf4(var, i)
with netCDF4.Dataset(tmp_local_or_remote_netcdf, "r") as dsr:
for i, grp in enumerate(grps):
var = dsr[grp].variables
check_invalid_netcdf4(var, i)
with h5netcdf.File(tmp_local_or_remote_netcdf, "r") as ds:
with raises(ValueError):
ds["bar"].variables["foo1"].dimensions
with raises(ValueError):
with h5netcdf.File(tmp_local_or_remote_netcdf, "r", phony_dims="srt") as ds:
pass
def check_invalid_netcdf4_mixed(var, i):
pdim = "phony_dim_{}".format(i)
assert var["foo1"].dimensions[0] == "y1"
assert var["foo1"].dimensions[1] == "z1"
assert var["foo1"].dimensions[2] == pdim
assert var["foo2"].dimensions[0] == "x1"
assert var["foo2"].dimensions[1] == "y1"
assert var["foo2"].dimensions[2] == "z1"
assert var["foo3"].dimensions[0] == "y1"
assert var["foo3"].dimensions[1] == "z1"
assert var["foo3"].dimensions[2] == pdim
assert var["foo4"].dimensions[0] == "x1"
assert var["foo4"].dimensions[1] == "y1"
assert var["foo4"].dimensions[2] == "z1"
assert var["x"].dimensions[0] == "y1"
assert var["y"].dimensions[0] == "y1"
assert var["z"].dimensions[0] == "y1"
assert var["x1"].dimensions[0] == "x1"
assert var["y1"].dimensions[0] == "y1"
assert var["z1"].dimensions[0] == "z1"
def test_invalid_netcdf4_mixed(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, "w") as f:
var, var2 = create_invalid_netcdf_data()
for k, v in var.items():
f.create_dataset(k, data=v)
for k, v in var2.items():
f.create_dataset(k, data=np.arange(v))
if version.parse(h5py.__version__) < version.parse("2.10.0"):
f["foo2"].dims.create_scale(f["x1"])
f["foo2"].dims.create_scale(f["y1"])
f["foo2"].dims.create_scale(f["z1"])
else:
f["x1"].make_scale()
f["y1"].make_scale()
f["z1"].make_scale()
f["foo2"].dims[0].attach_scale(f["x1"])
f["foo2"].dims[1].attach_scale(f["y1"])
f["foo2"].dims[2].attach_scale(f["z1"])
with h5netcdf.File(tmp_local_or_remote_netcdf, "r", phony_dims="sort") as ds:
var = ds.variables
check_invalid_netcdf4_mixed(var, 3)
with h5netcdf.File(tmp_local_or_remote_netcdf, "r", phony_dims="access") as ds:
var = ds.variables
check_invalid_netcdf4_mixed(var, 0)
with netCDF4.Dataset(tmp_local_or_remote_netcdf, "r") as ds:
var = ds.variables
check_invalid_netcdf4_mixed(var, 3)
with h5netcdf.File(tmp_local_or_remote_netcdf, "r") as ds:
with raises(ValueError):
ds.variables["foo1"].dimensions
def test_invalid_netcdf_malformed_dimension_scales(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, "w") as f:
foo_data = np.arange(125).reshape(5, 5, 5)
f.create_dataset("foo1", data=foo_data)
f.create_dataset("x", data=np.arange(5))
f.create_dataset("y", data=np.arange(5))
f.create_dataset("z", data=np.arange(5))
if version.parse(h5py.__version__) < version.parse("2.10.0"):
f["foo1"].dims.create_scale(f["x"])
f["foo1"].dims.create_scale(f["y"])
f["foo1"].dims.create_scale(f["z"])
else:
f["x"].make_scale()
f["y"].make_scale()
f["z"].make_scale()
f["foo1"].dims[0].attach_scale(f["x"])
with raises(ValueError):
with h5netcdf.File(tmp_local_or_remote_netcdf, "r", phony_dims="sort") as ds:
assert ds
def test_hierarchical_access_auto_create(tmp_local_or_remote_netcdf):
ds = h5netcdf.File(tmp_local_or_remote_netcdf, "w")
ds.create_variable("/foo/bar", data=1)
g = ds.create_group("foo/baz")
g.create_variable("/foo/hello", data=2)
assert set(ds) == set(["foo"])
assert set(ds["foo"]) == set(["bar", "baz", "hello"])
ds.close()
ds = h5netcdf.File(tmp_local_or_remote_netcdf, "r")
assert set(ds) == set(["foo"])
assert set(ds["foo"]) == set(["bar", "baz", "hello"])
ds.close()
def test_Netcdf4Dimid(tmp_local_netcdf):
# regression test for https://github.com/h5netcdf/h5netcdf/issues/53
with h5netcdf.File(tmp_local_netcdf, "w") as f:
f.dimensions["x"] = 1
g = f.create_group("foo")
g.dimensions["x"] = 2
g.dimensions["y"] = 3
with h5py.File(tmp_local_netcdf, "r") as f:
# all dimension IDs should be present exactly once
dim_ids = {f[name].attrs["_Netcdf4Dimid"] for name in ["x", "foo/x", "foo/y"]}
assert dim_ids == {0, 1, 2}
def test_reading_str_array_from_netCDF4(tmp_local_netcdf, decode_vlen_strings):
# This tests reading string variables created by netCDF4
with netCDF4.Dataset(tmp_local_netcdf, "w") as ds:
ds.createDimension("foo1", _string_array.shape[0])
ds.createDimension("foo2", _string_array.shape[1])
ds.createVariable("bar", str, ("foo1", "foo2"))
ds.variables["bar"][:] = _string_array
ds = h5netcdf.File(tmp_local_netcdf, "r", **decode_vlen_strings)
v = ds.variables["bar"]
if getattr(ds, "decode_vlen_strings", True):
assert array_equal(v, _string_array)
else:
assert array_equal(v, np.char.encode(_string_array))
ds.close()
def test_nc_properties_new(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w"):
pass
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, "r") as f:
assert b"h5netcdf" in f.attrs["_NCProperties"]
def test_failed_read_open_and_clean_delete(tmpdir):
# A file that does not exist but is opened for
# reading should only raise an IOError and
# no AttributeError at garbage collection.
path = str(tmpdir.join("this_file_does_not_exist.nc"))
try:
with h5netcdf.File(path, "r") as ds:
assert ds
except IOError:
pass
# Look at garbage collection:
# A simple gc.collect() does not raise an exception.
# Must seek the File object and imitate its del command
# by forcing it to close.
obj_list = gc.get_objects()
for obj in obj_list:
try:
is_h5netcdf_File = isinstance(obj, h5netcdf.File)
except AttributeError:
is_h5netcdf_File = False
if is_h5netcdf_File:
obj.close()
def test_create_variable_matching_saved_dimension(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
if h5 is not h5py:
pytest.xfail("https://github.com/h5netcdf/h5netcdf/issues/48")
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as f:
f.dimensions["x"] = 2
f.create_variable("y", data=[1, 2], dimensions=("x",))
with h5.File(tmp_local_or_remote_netcdf, "r") as f:
dimlen = f"{f['y'].dims[0].values()[0].size:10}"
assert f["y"].dims[0].keys() == [NOT_A_VARIABLE.decode("ascii") + dimlen]
with h5netcdf.File(tmp_local_or_remote_netcdf, "a") as f:
f.create_variable("x", data=[0, 1], dimensions=("x",))
with h5.File(tmp_local_or_remote_netcdf, "r") as f:
assert f["y"].dims[0].keys() == ["x"]
def test_invalid_netcdf_error(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w", invalid_netcdf=False) as f:
# valid
f.create_variable(
"lzf_compressed", data=[1], dimensions=("x"), compression="lzf"
)
# invalid
with pytest.raises(h5netcdf.CompatibilityError):
f.create_variable("complex", data=1j)
with pytest.raises(h5netcdf.CompatibilityError):
f.attrs["complex_attr"] = 1j
with pytest.raises(h5netcdf.CompatibilityError):
f.create_variable("scaleoffset", data=[1], dimensions=("x",), scaleoffset=0)
def test_invalid_netcdf_okay(tmp_local_or_remote_netcdf):
if tmp_local_or_remote_netcdf.startswith(remote_h5):
pytest.skip("h5pyd does not support NumPy complex dtype yet")
with h5netcdf.File(tmp_local_or_remote_netcdf, "w", invalid_netcdf=True) as f:
f.create_variable(
"lzf_compressed", data=[1], dimensions=("x"), compression="lzf"
)
f.create_variable("complex", data=1j)
f.attrs["complex_attr"] = 1j
f.create_variable("scaleoffset", data=[1], dimensions=("x",), scaleoffset=0)
with h5netcdf.File(tmp_local_or_remote_netcdf, "r") as f:
np.testing.assert_equal(f["lzf_compressed"][:], [1])
assert f["complex"][...] == 1j
assert f.attrs["complex_attr"] == 1j
np.testing.assert_equal(f["scaleoffset"][:], [1])
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, "r") as f:
assert "_NCProperties" not in f.attrs
def test_reopen_file_different_dimension_sizes(tmp_local_netcdf):
# regression test for https://github.com/h5netcdf/h5netcdf/issues/55
with h5netcdf.File(tmp_local_netcdf, "w") as f:
f.create_variable("/one/foo", data=[1], dimensions=("x",))
with h5netcdf.File(tmp_local_netcdf, "a") as f:
f.create_variable("/two/foo", data=[1, 2], dimensions=("x",))
with netCDF4.Dataset(tmp_local_netcdf, "r") as f:
assert f.groups["one"].variables["foo"][...].shape == (1,)
def test_invalid_then_valid_no_ncproperties(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w", invalid_netcdf=True):
pass
with h5netcdf.File(tmp_local_or_remote_netcdf, "a"):
pass
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, "r") as f:
# still not a valid netcdf file
assert "_NCProperties" not in f.attrs
def test_creating_and_resizing_unlimited_dimensions(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as f:
f.dimensions["x"] = None
f.dimensions["y"] = 15
f.dimensions["z"] = None
f.resize_dimension("z", 20)
with pytest.raises(ValueError) as e:
f.resize_dimension("y", 20)
assert e.value.args[0] == (
"Dimension 'y' is not unlimited and thus cannot be resized."
)
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
# Assert some behavior observed by using the C netCDF bindings.
with h5.File(tmp_local_or_remote_netcdf, "r") as f:
assert f["x"].shape == (0,)
assert f["x"].maxshape == (None,)
assert f["y"].shape == (15,)
assert f["y"].maxshape == (15,)
assert f["z"].shape == (20,)
assert f["z"].maxshape == (None,)
def test_creating_variables_with_unlimited_dimensions(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as f:
f.dimensions["x"] = None
f.dimensions["y"] = 2
# Creating a variable without data will initialize an array with zero
# length.
f.create_variable("dummy", dimensions=("x", "y"), dtype=np.int64)
assert f.variables["dummy"].shape == (0, 2)
assert f.variables["dummy"]._h5ds.maxshape == (None, 2)
# Trying to create a variable while the current size of the dimension
# is still zero will fail.
with pytest.raises(ValueError) as e:
f.create_variable(
"dummy2", data=np.array([[1, 2], [3, 4]]), dimensions=("x", "y")
)
assert e.value.args[0] == "Shape tuple is incompatible with data"
# Creating a coordinate variable
f.create_variable("x", dimensions=("x",), dtype=np.int64)
# Resize data.
assert f.variables["dummy"].shape == (0, 2)
f.resize_dimension("x", 3)
# This will also force a resize of the existing variables and it will
# be padded with zeros.
assert f.dimensions["x"].size == 3
np.testing.assert_allclose(f.variables["dummy"], np.zeros((3, 2)))
# Creating another variable with no data will now also take the shape
# of the current dimensions.
f.create_variable("dummy3", dimensions=("x", "y"), dtype=np.int64)
assert f.variables["dummy3"].shape == (3, 2)
assert f.variables["dummy3"]._h5ds.maxshape == (None, 2)
np.testing.assert_allclose(f.variables["dummy3"], np.zeros((3, 2)))
# Writing to a variable with an unlimited dimension raises
with pytest.raises(TypeError) as e:
f.variables["dummy3"][:] = np.ones((5, 2))
assert e.value.args[0] == "Can't broadcast (5, 2) -> (3, 2)"
assert f.variables["dummy3"].shape == (3, 2)
assert f.variables["dummy3"]._h5ds.maxshape == (None, 2)
assert f["x"].shape == (3,)
assert f.dimensions["x"].size == 3
np.testing.assert_allclose(f.variables["dummy3"], np.zeros((3, 2)))
# Close and read again to also test correct parsing of unlimited
# dimensions.
with h5netcdf.File(tmp_local_or_remote_netcdf, "r") as f:
assert f.dimensions["x"].isunlimited()
assert f.dimensions["x"].size == 3
assert f._h5file["x"].maxshape == (None,)
assert f._h5file["x"].shape == (3,)
assert f.dimensions["y"].size == 2
assert f._h5file["y"].maxshape == (2,)
assert f._h5file["y"].shape == (2,)
def test_writing_to_an_unlimited_dimension(tmp_local_or_remote_netcdf):
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as f:
# Two dimensions, only one is unlimited.
f.dimensions["x"] = None
f.dimensions["y"] = 3
f.dimensions["z"] = None
# Cannot create it without first resizing it.
with pytest.raises(ValueError) as e:
f.create_variable(
"dummy1", data=np.array([[1, 2, 3]]), dimensions=("x", "y")
)
assert e.value.args[0] == "Shape tuple is incompatible with data"
# Without data.
f.create_variable("dummy1", dimensions=("x", "y"), dtype=np.int64)
f.create_variable("dummy2", dimensions=("x", "y"), dtype=np.int64)
f.create_variable("dummy3", dimensions=("x", "y"), dtype=np.int64)
f.create_variable("dummyX", dimensions=("x", "y", "z"), dtype=np.int64)
g = f.create_group("test")
g.create_variable("dummy4", dimensions=("y", "x", "x"), dtype=np.int64)
g.create_variable("dummy5", dimensions=("y", "y"), dtype=np.int64)
assert f.variables["dummy1"].shape == (0, 3)
assert f.variables["dummy2"].shape == (0, 3)
assert f.variables["dummy3"].shape == (0, 3)
assert f.variables["dummyX"].shape == (0, 3, 0)
assert g.variables["dummy4"].shape == (3, 0, 0)
assert g.variables["dummy5"].shape == (3, 3)
# resize dimensions and all connected variables
f.resize_dimension("x", 2)
assert f.variables["dummy1"].shape == (2, 3)
assert f.variables["dummy2"].shape == (2, 3)
assert f.variables["dummy3"].shape == (2, 3)
assert f.variables["dummyX"].shape == (2, 3, 0)
assert g.variables["dummy4"].shape == (3, 2, 2)
assert g.variables["dummy5"].shape == (3, 3)
# broadcast writing
f.variables["dummy3"][...] = [[1, 2, 3]]
np.testing.assert_allclose(f.variables["dummy3"], [[1, 2, 3], [1, 2, 3]])
def test_c_api_can_read_unlimited_dimensions(tmp_local_netcdf):
with h5netcdf.File(tmp_local_netcdf, "w") as f:
# Three dimensions, only one is limited.
f.dimensions["x"] = None
f.dimensions["y"] = 3
f.dimensions["z"] = None
f.create_variable("dummy1", dimensions=("x", "y"), dtype=np.int64)
f.create_variable("dummy2", dimensions=("y", "x", "x"), dtype=np.int64)
g = f.create_group("test")
g.create_variable("dummy3", dimensions=("y", "y"), dtype=np.int64)
g.create_variable("dummy4", dimensions=("z", "z"), dtype=np.int64)
f.resize_dimension("x", 2)
with netCDF4.Dataset(tmp_local_netcdf, "r") as f:
assert f.dimensions["x"].size == 2
assert f.dimensions["x"].isunlimited() is True
assert f.dimensions["y"].size == 3
assert f.dimensions["y"].isunlimited() is False
assert f.dimensions["z"].size == 0
assert f.dimensions["z"].isunlimited() is True
assert f.variables["dummy1"].shape == (2, 3)
assert f.variables["dummy2"].shape == (3, 2, 2)
g = f.groups["test"]
assert g.variables["dummy3"].shape == (3, 3)
assert g.variables["dummy4"].shape == (0, 0)
def test_reading_unlimited_dimensions_created_with_c_api(tmp_local_netcdf):
with netCDF4.Dataset(tmp_local_netcdf, "w") as f:
f.createDimension("x", None)
f.createDimension("y", 3)
f.createDimension("z", None)
dummy1 = f.createVariable("dummy1", float, ("x", "y"))
f.createVariable("dummy2", float, ("y", "x", "x"))
g = f.createGroup("test")
g.createVariable("dummy3", float, ("y", "y"))
g.createVariable("dummy4", float, ("z", "z"))
# Assign something to trigger a resize.
dummy1[:] = [[1, 2, 3], [4, 5, 6]]
# Create another variable with same dimensions
f.createVariable("dummy5", float, ("x", "y"))
with h5netcdf.File(tmp_local_netcdf, "r") as f:
assert f.dimensions["x"].isunlimited()
assert f.dimensions["y"].size == 3
assert f.dimensions["z"].isunlimited()
# This is parsed correctly due to h5netcdf's init trickery.
assert f.dimensions["x"].size == 2
assert f.dimensions["y"].size == 3
assert f.dimensions["z"].size == 0
# But the actual data-set and arrays are not correct.
# assert f["dummy1"].shape == (2, 3)
# XXX: This array has some data with dimension x - netcdf does not
# appear to keep dimensions consistent.
# With https://github.com/h5netcdf/h5netcdf/pull/103 h5netcdf will
# return a padded array
assert f["dummy2"].shape == (3, 2, 2)
f.groups["test"]["dummy3"].shape == (3, 3)
f.groups["test"]["dummy4"].shape == (0, 0)
def test_reading_unused_unlimited_dimension(tmp_local_or_remote_netcdf):
"""Test reading a file with unused dimension of unlimited size"""
with h5netcdf.File(tmp_local_or_remote_netcdf, "w") as f:
f.dimensions = {"x": None}
f.resize_dimension("x", 5)
assert f.dimensions["x"].isunlimited()
assert f.dimensions["x"].size == 5
def test_reading_special_datatype_created_with_c_api(tmp_local_netcdf):
"""Test reading a file with unsupported Datatype"""
with netCDF4.Dataset(tmp_local_netcdf, "w") as f:
complex128 = np.dtype([("real", np.float64), ("imag", np.float64)])
f.createCompoundType(complex128, "complex128")
with h5netcdf.File(tmp_local_netcdf, "r") as f:
pass
def test_nc4_non_coord(tmp_local_netcdf):
# Track order True is the new default for versions after 0.12.0
# 0.12.0 defaults to `track_order=False`
# Ensure that the tests order the variables in their creation order
# not alphabetical order
with h5netcdf.File(tmp_local_netcdf, "w") as f:
f.dimensions = {"x": None, "y": 2}
f.create_variable("test", dimensions=("x",), dtype=np.int64)
f.create_variable("y", dimensions=("x",), dtype=np.int64)
with h5netcdf.File(tmp_local_netcdf, "r") as f:
assert list(f.dimensions) == ["x", "y"]
assert f.dimensions["x"].size == 0
assert f.dimensions["x"].isunlimited()
assert f.dimensions["y"].size == 2
assert list(f.variables) == ["y", "test"]
assert list(f._h5group.keys()) == ["_nc4_non_coord_y", "test", "x", "y"]
def test_overwrite_existing_file(tmp_local_netcdf):
# create file with _NCProperties attribute
with netCDF4.Dataset(tmp_local_netcdf, "w") as ds:
ds.createDimension("x", 10)
# check attribute
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
assert ds.attrs._h5attrs.get("_NCProperties", False)
# overwrite file with legacyapi
with legacyapi.Dataset(tmp_local_netcdf, "w") as ds:
ds.createDimension("x", 10)
# check attribute
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
assert ds.attrs._h5attrs.get("_NCProperties", False)
# overwrite file with new api
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions["x"] = 10
# check attribute
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
assert ds.attrs._h5attrs.get("_NCProperties", False)
def test_scales_on_append(tmp_local_netcdf):
# create file with _NCProperties attribute
with netCDF4.Dataset(tmp_local_netcdf, "w") as ds:
ds.createDimension("x", 10)
# append file with netCDF4
with netCDF4.Dataset(tmp_local_netcdf, "r+") as ds:
ds.createVariable("test", "i4", ("x",))
# check scales
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
assert ds.variables["test"].attrs._h5attrs.get("DIMENSION_LIST", False)
# append file with legacyapi
with legacyapi.Dataset(tmp_local_netcdf, "r+") as ds:
ds.createVariable("test1", "i4", ("x",))
# check scales
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
assert ds.variables["test1"].attrs._h5attrs.get("DIMENSION_LIST", False)
def create_attach_scales(filename, append_module):
# create file with netCDF4
with netCDF4.Dataset(filename, "w") as ds:
ds.createDimension("x", 0)
ds.createDimension("y", 1)
ds.createVariable("test", "i4", ("x",))
ds.variables["test"] = np.ones((10,))
# append file with netCDF4
with append_module.Dataset(filename, "a") as ds:
ds.createVariable("test1", "i4", ("x",))
ds.createVariable("y", "i4", ("x", "y"))
# check scales
with h5netcdf.File(filename, "r") as ds:
refs = ds._h5group["x"].attrs.get("REFERENCE_LIST", False)
assert len(refs) == 3
for (ref, dim), name in zip(refs, ["/test", "/test1", "/_nc4_non_coord_y"]):
assert dim == 0
assert ds._root._h5file[ref].name == name
def test_create_attach_scales_netcdf4(tmp_local_netcdf):
create_attach_scales(tmp_local_netcdf, netCDF4)
def test_create_attach_scales_legacyapi(tmp_local_netcdf):
create_attach_scales(tmp_local_netcdf, legacyapi)
def test_detach_scale(tmp_local_netcdf):
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions["x"] = 2
ds.dimensions["y"] = 2
with h5netcdf.File(tmp_local_netcdf, "a") as ds:
ds.create_variable("test", dimensions=("x",), dtype=np.int64)
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
refs = ds._h5group["x"].attrs.get("REFERENCE_LIST", False)
assert len(refs) == 1
for (ref, dim), name in zip(refs, ["/test"]):
assert dim == 0
assert ds._root._h5file[ref].name == name
with h5netcdf.File(tmp_local_netcdf, "a") as ds:
ds.dimensions["x"]._detach_scale()
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
refs = ds._h5group["x"].attrs.get("REFERENCE_LIST", False)
assert not refs
def test_is_scale(tmp_local_netcdf):
with legacyapi.Dataset(tmp_local_netcdf, "w") as ds:
ds.createDimension("x", 10)
with legacyapi.Dataset(tmp_local_netcdf, "r") as ds:
assert ds.dimensions["x"]._isscale
def test_get_dim_scale_refs(tmp_local_netcdf):
with legacyapi.Dataset(tmp_local_netcdf, "w") as ds:
ds.createDimension("x", 10)
ds.createVariable("test0", "i8", ("x",))
ds.createVariable("test1", "i8", ("x",))
with legacyapi.Dataset(tmp_local_netcdf, "r") as ds:
refs = ds.dimensions["x"]._scale_refs
assert ds._h5file[refs[0][0]] == ds["test0"]._h5ds
assert ds._h5file[refs[1][0]] == ds["test1"]._h5ds
def create_netcdf_dimensions(ds, idx):
# dimension and variable setup is adapted from the blogpost at
# https://www.unidata.ucar.edu/blogs/developer/en/entry/netcdf4_shared_dimensions
g = ds.createGroup("dimtest" + str(idx))
g.createDimension("time", 0) # time
g.createDimension("nvec", 5 + idx) # nvec
g.createDimension("sample", 2 + idx) # sample
g.createDimension("ship", 3 + idx) # ship
g.createDimension("ship_strlen", 10) # ship_strlen
g.createDimension("collide", 7 + idx) # collide
time = g.createVariable("time", "f8", ("time",))
data = g.createVariable("data", "i8", ("ship", "sample", "time", "nvec"))
collide = g.createVariable("collide", "i8", ("nvec",))
non_collide = g.createVariable("non_collide", "i8", ("nvec",))
ship = g.createVariable("ship", "S1", ("ship", "ship_strlen"))
sample = g.createVariable("sample", "i8", ("time", "sample"))
time[:] = np.arange(10 + idx)
data[:] = np.ones((3 + idx, 2 + idx, 10 + idx, 5 + idx)) * 12.0
collide[...] = np.arange(5 + idx)
non_collide[...] = np.arange(5 + idx) + 10
sample[0 : 2 + idx, : 2 + idx] = np.ones((2 + idx, 2 + idx))
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
ship[0] = list("Skiff ")
else:
ship[0] = string_to_char(np.array("Skiff ", dtype="|S1"))
def create_h5netcdf_dimensions(ds, idx):
# dimension and variable setup is adapted from the blogpost at
# https://www.unidata.ucar.edu/blogs/developer/en/entry/netcdf4_shared_dimensions
g = ds.create_group("dimtest" + str(idx))
g.dimensions["time"] = 0 # time
g.dimensions["nvec"] = 5 + idx # nvec
g.dimensions["sample"] = 2 + idx # sample
g.dimensions["ship"] = 3 + idx # ship
g.dimensions["ship_strlen"] = 10 # ship_strlen
g.dimensions["collide"] = 7 + idx # collide
g.create_variable("time", dimensions=("time",), dtype=np.float64)
g.create_variable(
"data", dimensions=("ship", "sample", "time", "nvec"), dtype=np.int64
)
g.create_variable("collide", dimensions=("nvec",), dtype=np.int64)
g.create_variable("non_collide", dimensions=("nvec",), dtype=np.int64)
g.create_variable("sample", dimensions=("time", "sample"), dtype=np.int64)
g.create_variable("ship", dimensions=("ship", "ship_strlen"), dtype="S1")
g.resize_dimension("time", 10 + idx)
g.variables["time"][:] = np.arange(10 + idx)
g.variables["data"][:] = np.ones((3 + idx, 2 + idx, 10 + idx, 5 + idx)) * 12.0
g.variables["collide"][...] = np.arange(5 + idx)
g.variables["non_collide"][...] = np.arange(5 + idx) + 10
g.variables["sample"][0 : 2 + idx, : 2 + idx] = np.ones((2 + idx, 2 + idx))
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
g.variables["ship"][0] = list("Skiff ")
else:
g.variables["ship"][0] = string_to_char(np.array("Skiff ", dtype="|S1"))
def check_netcdf_dimensions(tmp_netcdf, write_module, read_module):
if read_module in [legacyapi, netCDF4]:
opener = read_module.Dataset
else:
opener = h5netcdf.File
with opener(tmp_netcdf, "r") as ds:
for i, grp in enumerate(["dimtest0", "dimtest1"]):
g = ds.groups[grp]
assert set(g.dimensions) == {
"collide",
"ship_strlen",
"time",
"nvec",
"ship",
"sample",
}
if read_module in [legacyapi, h5netcdf]:
assert g.dimensions["time"].isunlimited()
assert g.dimensions["time"].size == 10 + i
assert not g.dimensions["nvec"].isunlimited()
assert g.dimensions["nvec"].size == 5 + i
assert not g.dimensions["sample"].isunlimited()
assert g.dimensions["sample"].size == 2 + i
assert not g.dimensions["collide"].isunlimited()
assert g.dimensions["collide"].size == 7 + i
assert not g.dimensions["ship"].isunlimited()
assert g.dimensions["ship"].size == 3 + i
assert not g.dimensions["ship_strlen"].isunlimited()
assert g.dimensions["ship_strlen"].size == 10
else:
assert g.dimensions["time"].isunlimited()
assert g.dimensions["time"].size == 10 + i
assert not g.dimensions["nvec"].isunlimited()
assert g.dimensions["nvec"].size == 5 + i
assert not g.dimensions["sample"].isunlimited()
assert g.dimensions["sample"].size == 2 + i
assert not g.dimensions["ship"].isunlimited()
assert g.dimensions["ship"].size == 3 + i
assert not g.dimensions["ship_strlen"].isunlimited()
assert g.dimensions["ship_strlen"].size == 10
assert not g.dimensions["collide"].isunlimited()
assert g.dimensions["collide"].size == 7 + i
assert set(g.variables) == {
"data",
"collide",
"non_collide",
"time",
"sample",
"ship",
}
assert g.variables["time"].shape == (10 + i,)
assert g.variables["data"].shape == (3 + i, 2 + i, 10 + i, 5 + i)
assert g.variables["collide"].shape == (5 + i,)
assert g.variables["non_collide"].shape == (5 + i,)
assert g.variables["sample"].shape == (10 + i, 2 + i)
assert g.variables["ship"].shape == (3 + i, 10)
def write_dimensions(tmp_netcdf, write_module):
if write_module in [legacyapi, netCDF4]:
with write_module.Dataset(tmp_netcdf, "w") as ds:
create_netcdf_dimensions(ds, 0)
create_netcdf_dimensions(ds, 1)
else:
with write_module.File(tmp_netcdf, "w") as ds:
create_h5netcdf_dimensions(ds, 0)
create_h5netcdf_dimensions(ds, 1)
@pytest.fixture(
params=[
[netCDF4, netCDF4],
[legacyapi, legacyapi],
[h5netcdf, h5netcdf],
[legacyapi, netCDF4],
[netCDF4, legacyapi],
[h5netcdf, netCDF4],
[netCDF4, h5netcdf],
[legacyapi, h5netcdf],
[h5netcdf, legacyapi],
]
)
def read_write_matrix(request):
print("write module:", request.param[0].__name__)
print("read_module:", request.param[1].__name__)
return request.param
def test_dimensions(tmp_local_netcdf, read_write_matrix):
write_dimensions(tmp_local_netcdf, read_write_matrix[0])
check_netcdf_dimensions(
tmp_local_netcdf, read_write_matrix[0], read_write_matrix[1]
)
def test_no_circular_references(tmp_local_netcdf):
# https://github.com/h5py/h5py/issues/2019
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions["x"] = 2
ds.dimensions["y"] = 2
gc.collect()
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
refs = gc.get_referrers(ds)
for ref in refs:
print(ref)
assert len(refs) == 1
def test_expanded_variables_netcdf4(tmp_local_netcdf, netcdf_write_module):
with netcdf_write_module.Dataset(tmp_local_netcdf, "w") as ds:
f = ds.createGroup("test")
f.createDimension("x", None)
f.createDimension("y", 3)
dummy1 = f.createVariable("dummy1", float, ("x", "y"))
dummy2 = f.createVariable("dummy2", float, ("x", "y"))
dummy3 = f.createVariable("dummy3", float, ("x", "y"))
dummy4 = f.createVariable("dummy4", float, ("x", "y"))
dummy1[:] = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
dummy2[:] = [[1, 2, 3]]
dummy3[:] = [[1, 2, 3], [4, 5, 6]]
# don't mask, since h5netcdf doesn't do masking
if netcdf_write_module == netCDF4:
ds.set_auto_mask(False)
res1 = dummy1[:]
res2 = dummy2[:]
res3 = dummy3[:]
res4 = dummy4[:]
with netCDF4.Dataset(tmp_local_netcdf, "r") as ds:
# don't mask, since h5netcdf doesn't do masking
if netcdf_write_module == netCDF4:
ds.set_auto_mask(False)
f = ds["test"]
np.testing.assert_allclose(f.variables["dummy1"][:], res1)
assert f.variables["dummy1"].shape == (3, 3)
np.testing.assert_allclose(f.variables["dummy2"][:], res2)
assert f.variables["dummy2"].shape == (3, 3)
np.testing.assert_allclose(f.variables["dummy3"][:], res3)
assert f.variables["dummy3"].shape == (3, 3)
np.testing.assert_allclose(f.variables["dummy4"][:], res4)
assert f.variables["dummy4"].shape == (3, 3)
with legacyapi.Dataset(tmp_local_netcdf, "r") as ds:
f = ds["test"]
np.testing.assert_allclose(f.variables["dummy1"][:], res1)
assert f.variables["dummy1"].shape == (3, 3)
assert f.variables["dummy1"]._h5ds.shape == (3, 3)
np.testing.assert_allclose(f.variables["dummy2"][:], res2)
assert f.variables["dummy2"].shape == (3, 3)
assert f.variables["dummy2"]._h5ds.shape == (1, 3)
np.testing.assert_allclose(f.variables["dummy3"][:], res3)
assert f.variables["dummy3"].shape == (3, 3)
assert f.variables["dummy3"]._h5ds.shape == (2, 3)
np.testing.assert_allclose(f.variables["dummy4"][:], res4)
assert f.variables["dummy4"].shape == (3, 3)
assert f.variables["dummy4"]._h5ds.shape == (0, 3)
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
f = ds["test"]
np.testing.assert_allclose(f.variables["dummy1"][:], res1)
assert f.variables["dummy1"].shape == (3, 3)
assert f.variables["dummy1"]._h5ds.shape == (3, 3)
np.testing.assert_allclose(f.variables["dummy2"][:], res2)
assert f.variables["dummy2"].shape == (3, 3)
assert f.variables["dummy2"]._h5ds.shape == (1, 3)
np.testing.assert_allclose(f.variables["dummy3"][:], res3)
assert f.variables["dummy3"].shape == (3, 3)
assert f.variables["dummy3"]._h5ds.shape == (2, 3)
np.testing.assert_allclose(f.variables["dummy4"][:], res4)
assert f.variables["dummy4"].shape == (3, 3)
assert f.variables["dummy4"]._h5ds.shape == (0, 3)
# https://github.com/h5netcdf/h5netcdf/issues/136
@pytest.mark.skip(reason="h5py bug with track_order prevents editing with netCDF4")
def test_creation_with_h5netcdf_edit_with_netcdf4(tmp_local_netcdf):
# In version 0.12.0, the wrong file creation attributes were used
# making netcdf4 unable to open files created by h5netcdf
# https://github.com/h5netcdf/h5netcdf/issues/128
with h5netcdf.File(tmp_local_netcdf, "w") as the_file:
the_file.dimensions = {"x": 5}
variable = the_file.create_variable("hello", ("x",), float)
variable[...] = 5
with netCDF4.Dataset(tmp_local_netcdf, mode="a") as the_file:
variable = the_file["hello"]
np.testing.assert_array_equal(variable[...].data, 5)
# Edit an existing variable
variable[:3] = 2
# Create a new variable
variable = the_file.createVariable("goodbye", float, ("x",))
variable[...] = 10
with h5netcdf.File(tmp_local_netcdf, "a") as the_file:
# Ensure edited variable is consistent with the expected data
variable = the_file["hello"]
np.testing.assert_array_equal(variable[...].data, [2, 2, 2, 5, 5])
# Ensure new variable is accessible
variable = the_file["goodbye"]
np.testing.assert_array_equal(variable[...].data, 10)
# https://github.com/h5netcdf/h5netcdf/issues/136
@pytest.mark.skip(reason="h5py bug with track_order")
def test_track_order_false(tmp_local_netcdf):
# track_order must be specified as True or not specified at all
# https://github.com/h5netcdf/h5netcdf/issues/130
with pytest.raises(ValueError):
h5netcdf.File(tmp_local_netcdf, "w", track_order=False)
with h5netcdf.File(tmp_local_netcdf, "w", track_order=True):
pass
# This should always work with the default file opening settings
# https://github.com/h5netcdf/h5netcdf/issues/136#issuecomment-1017457067
def test_more_than_7_attr_creation(tmp_local_netcdf):
with h5netcdf.File(tmp_local_netcdf, "w") as h5file:
for i in range(100):
h5file.attrs[f"key{i}"] = i
h5file.attrs[f"key{i}"] = 0
# Add a test that is supposed to fail in relation to issue #136
# We choose to monitor when h5py will have fixed their issue in our test suite
# to enhance maintainability
# https://github.com/h5netcdf/h5netcdf/issues/136#issuecomment-1017457067
@pytest.mark.parametrize("track_order", [False, True])
def test_more_than_7_attr_creation_track_order(tmp_local_netcdf, track_order):
if track_order:
expected_errors = pytest.raises(KeyError)
else:
# We don't expect any errors. This is effectively a void context manager
expected_errors = memoryview(b"")
with expected_errors:
with h5netcdf.File(tmp_local_netcdf, "w", track_order=track_order) as h5file:
for i in range(100):
h5file.attrs[f"key{i}"] = i
h5file.attrs[f"key{i}"] = 0
def test_group_names(tmp_local_netcdf):
# https://github.com/h5netcdf/h5netcdf/issues/68
with netCDF4.Dataset(tmp_local_netcdf, mode="w") as ds:
for i in range(10):
ds = ds.createGroup(f"group{i:02d}")
with netCDF4.Dataset(tmp_local_netcdf, "r") as ds:
assert ds.name == "/"
name = ""
for i in range(10):
name = "/".join([name, f"group{i:02d}"])
assert ds[name].name == name.split("/")[-1]
with legacyapi.Dataset(tmp_local_netcdf, "r") as ds:
assert ds.name == "/"
name = ""
for i in range(10):
name = "/".join([name, f"group{i:02d}"])
assert ds[name].name == name.split("/")[-1]
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
assert ds.name == "/"
name = ""
for i in range(10):
name = "/".join([name, f"group{i:02d}"])
assert ds[name].name == name
def test_legacyapi_endianess(tmp_local_netcdf):
# https://github.com/h5netcdf/h5netcdf/issues/15
big = legacyapi._check_return_dtype_endianess("big")
little = legacyapi._check_return_dtype_endianess("little")
native = legacyapi._check_return_dtype_endianess("native")
with legacyapi.Dataset(tmp_local_netcdf, "w") as ds:
ds.createDimension("x", 4)
# test creating variable using endian keyword argument
v = ds.createVariable("big", int, ("x"), endian="big")
v[...] = 65533
v = ds.createVariable("little", int, ("x"), endian="little")
v[...] = 65533
v = ds.createVariable("native", int, ("x"), endian="native")
v[...] = 65535
with h5py.File(tmp_local_netcdf, "r") as ds:
assert ds["big"].dtype.byteorder == big
assert ds["little"].dtype.byteorder == little
assert ds["native"].dtype.byteorder == native
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
assert ds["big"].dtype.byteorder == big
assert ds["little"].dtype.byteorder == little
assert ds["native"].dtype.byteorder == native
with legacyapi.Dataset(tmp_local_netcdf, "r") as ds:
assert ds["big"].dtype.byteorder == big
assert ds["little"].dtype.byteorder == little
assert ds["native"].dtype.byteorder == native
with netCDF4.Dataset(tmp_local_netcdf, "r") as ds:
assert ds["big"].dtype.byteorder == big
assert ds["little"].dtype.byteorder == little
assert ds["native"].dtype.byteorder == native
def test_bool_slicing_length_one_dim(tmp_local_netcdf):
# see https://github.com/h5netcdf/h5netcdf/issues/23
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions = {"x": 1, "y": 2}
v = ds.create_variable("hello", ("x", "y"), "float")
v[:] = np.ones((1, 2))
bool_slice = np.array([1], dtype=bool)
# works for legacy API
with legacyapi.Dataset(tmp_local_netcdf, "a") as ds:
data = ds["hello"][bool_slice, :]
np.testing.assert_equal(data, np.ones((1, 2)))
ds["hello"][bool_slice, :] = np.zeros((1, 2))
data = ds["hello"][bool_slice, :]
np.testing.assert_equal(data, np.zeros((1, 2)))
# should raise for h5py >= 3.0.0
with h5netcdf.File(tmp_local_netcdf, "r") as ds:
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
error = "Indexing arrays must have integer dtypes"
with pytest.raises(TypeError) as e:
ds["hello"][bool_slice, :]
assert error == str(e.value)
else:
ds["hello"][bool_slice, :]
def test_default_chunking(tmp_local_netcdf):
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions = {"x": 10, "y": 10, "z": 10, "t": None}
v = ds.create_variable(
"hello", ("x", "y", "z", "t"), "float", chunking_heuristic="h5py"
)
chunks_h5py = v.chunks
v = ds.create_variable(
"hello2", ("x", "y", "z", "t"), "float", chunking_heuristic=None
)
chunks_default = v.chunks
ds.resize_dimension("t", 4)
v = ds.create_variable(
"hello3", ("x", "y", "z", "t"), "float", chunking_heuristic="h5py"
)
chunks_resized = v.chunks
# cases above should be equivalent to a fixed dimension with appropriate size
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions = {"x": 10, "y": 10, "z": 10, "t": 1024}
v = ds.create_variable(
"hello",
("x", "y", "z", "t"),
"float",
chunks=True,
chunking_heuristic="h5py",
)
chunks_true = v.chunks
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions = {"x": 10, "y": 10, "z": 10, "t": 4}
v = ds.create_variable(
"hello",
("x", "y", "z", "t"),
"float",
chunks=True,
chunking_heuristic="h5py",
)
chunks_true_resized = v.chunks
assert chunks_h5py == chunks_true
assert chunks_default == chunks_true
assert chunks_resized == chunks_true_resized
def test_h5netcdf_chunking(tmp_local_netcdf):
# produces much smaller chunks for unsized dimensions
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions = {"x": 10, "y": 10, "z": 10, "t": None}
v = ds.create_variable(
"hello", ("x", "y", "z", "t"), "float", chunking_heuristic="h5netcdf"
)
chunks_h5netcdf = v.chunks
assert chunks_h5netcdf == (10, 10, 10, 1)
# should produce chunks > 1 for small fixed dims
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions = {"x": 10, "t": None}
v = ds.create_variable(
"hello", ("x", "t"), "float", chunking_heuristic="h5netcdf"
)
chunks_h5netcdf = v.chunks
assert chunks_h5netcdf == (10, 128)
# resized unlimited dimensions should be treated like fixed dims
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
ds.dimensions = {"x": 10, "y": 10, "z": 10, "t": None}
ds.resize_dimension("t", 10)
v = ds.create_variable(
"hello", ("x", "y", "z", "t"), "float", chunking_heuristic="h5netcdf"
)
chunks_h5netcdf = v.chunks
assert chunks_h5netcdf == (5, 5, 5, 10)
def test_create_invalid_netcdf_catch_error(tmp_local_netcdf):
# see https://github.com/h5netcdf/h5netcdf/issues/138
with h5netcdf.File("test.nc", "w") as f:
try:
f.create_variable("test", ("x", "y"), data=np.ones((10, 10), dtype="bool"))
except CompatibilityError:
pass
assert repr(f.dimensions) == "<h5netcdf.Dimensions: >"
def test_dimensions_in_parent_groups():
with netCDF4.Dataset("test_netcdf.nc", mode="w") as ds:
ds0 = ds
for i in range(10):
ds = ds.createGroup(f"group{i:02d}")
ds0.createDimension("x", 10)
ds0.createDimension("y", 20)
ds0["group00"].createVariable("test", float, ("x", "y"))
var = ds0["group00"].createVariable("x", float, ("x", "y"))
var[:] = np.ones((10, 20))
with legacyapi.Dataset("test_legacy.nc", mode="w") as ds:
ds0 = ds
for i in range(10):
ds = ds.createGroup(f"group{i:02d}")
ds0.createDimension("x", 10)
ds0.createDimension("y", 20)
ds0["group00"].createVariable("test", float, ("x", "y"))
var = ds0["group00"].createVariable("x", float, ("x", "y"))
var[:] = np.ones((10, 20))
with h5netcdf.File("test_netcdf.nc", mode="r") as ds0:
with h5netcdf.File("test_legacy.nc", mode="r") as ds1:
assert repr(ds0.dimensions["x"]) == repr(ds1.dimensions["x"])
assert repr(ds0.dimensions["y"]) == repr(ds1.dimensions["y"])
assert repr(ds0["group00"]) == repr(ds1["group00"])
assert repr(ds0["group00"]["test"]) == repr(ds1["group00"]["test"])
assert repr(ds0["group00"]["x"]) == repr(ds1["group00"]["x"])
def test_array_attributes(tmp_local_netcdf):
with h5netcdf.File(tmp_local_netcdf, "w") as ds:
dt = h5py.string_dtype("utf-8")
unicode = "unicodé"
ds.attrs["unicode"] = unicode
ds.attrs["unicode_0dim"] = np.array(unicode, dtype=dt)
ds.attrs["unicode_1dim"] = np.array([unicode], dtype=dt)
ds.attrs["unicode_arrary"] = np.array([unicode, "foobár"], dtype=dt)
ds.attrs["unicode_list"] = [unicode]
dt = h5py.string_dtype("ascii")
# if dtype is ascii it's irrelevant if the data is provided as bytes or string
ascii = "ascii"
ds.attrs["ascii"] = ascii
ds.attrs["ascii_0dim"] = np.array(ascii, dtype=dt)
ds.attrs["ascii_1dim"] = np.array([ascii], dtype=dt)
ds.attrs["ascii_array"] = np.array([ascii, "foobar"], dtype=dt)
ds.attrs["ascii_list"] = [ascii]
ascii = b"ascii"
ds.attrs["bytes"] = ascii
ds.attrs["bytes_0dim"] = np.array(ascii, dtype=dt)
ds.attrs["bytes_1dim"] = np.array([ascii], dtype=dt)
ds.attrs["bytes_array"] = np.array([ascii, b"foobar"], dtype=dt)
ds.attrs["bytes_list"] = [ascii]
dt = h5py.string_dtype("utf-8", 10)
# unicode needs to be encoded properly for fixed size string type
ds.attrs["unicode_fixed"] = np.array(unicode.encode("utf-8"), dtype=dt)
ds.attrs["unicode_fixed_0dim"] = np.array(unicode.encode("utf-8"), dtype=dt)
ds.attrs["unicode_fixed_1dim"] = np.array([unicode.encode("utf-8")], dtype=dt)
ds.attrs["unicode_fixed_arrary"] = np.array(
[unicode.encode("utf-8"), "foobár".encode("utf-8")], dtype=dt
)
dt = h5py.string_dtype("ascii", 10)
ascii = "ascii"
ds.attrs["ascii_fixed"] = np.array(ascii, dtype=dt)
ds.attrs["ascii_fixed_0dim"] = np.array(ascii, dtype=dt)
ds.attrs["ascii_fixed_1dim"] = np.array([ascii], dtype=dt)
ds.attrs["ascii_fixed_array"] = np.array([ascii, "foobar"], dtype=dt)
ascii = b"ascii"
ds.attrs["bytes_fixed"] = np.array(ascii, dtype=dt)
ds.attrs["bytes_fixed_0dim"] = np.array(ascii, dtype=dt)
ds.attrs["bytes_fixed_1dim"] = np.array([ascii], dtype=dt)
ds.attrs["bytes_fixed_array"] = np.array([ascii, b"foobar"], dtype=dt)
ds.attrs["int"] = 1
ds.attrs["intlist"] = [1]
ds.attrs["int_array"] = np.arange(10)
with h5netcdf.File(tmp_local_netcdf, mode="r") as ds:
assert ds.attrs["unicode"] == unicode
assert ds.attrs["unicode_0dim"] == unicode
assert ds.attrs["unicode_1dim"] == unicode
assert ds.attrs["unicode_arrary"] == [unicode, "foobár"]
assert ds.attrs["unicode_list"] == unicode
# bytes and strings are received as strings for h5py3
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
ascii = "ascii"
foobar = "foobar"
# and bytes for h5py2
else:
ascii = b"ascii"
foobar = b"foobar"
assert ds.attrs["ascii"] == "ascii"
assert ds.attrs["ascii_0dim"] == ascii
assert ds.attrs["ascii_1dim"] == ascii
assert ds.attrs["ascii_array"] == [ascii, foobar]
# list is decoded for h5py2
assert ds.attrs["ascii_list"] == "ascii"
assert ds.attrs["bytes"] == ascii
assert ds.attrs["bytes_0dim"] == ascii
assert ds.attrs["bytes_1dim"] == ascii
assert ds.attrs["bytes_array"] == [ascii, foobar]
# list is decoded for h5py2
assert ds.attrs["bytes_list"] == "ascii"
assert ds.attrs["unicode_fixed"] == unicode
assert ds.attrs["unicode_fixed_0dim"] == unicode
assert ds.attrs["unicode_fixed_1dim"] == unicode
assert ds.attrs["unicode_fixed_arrary"] == [unicode, "foobár"]
ascii = "ascii"
assert ds.attrs["ascii_fixed"] == ascii
assert ds.attrs["ascii_fixed_0dim"] == ascii
assert ds.attrs["ascii_fixed_1dim"] == ascii
assert ds.attrs["ascii_fixed_array"] == [ascii, "foobar"]
assert ds.attrs["bytes_fixed"] == ascii
assert ds.attrs["bytes_fixed_0dim"] == ascii
assert ds.attrs["bytes_fixed_1dim"] == ascii
assert ds.attrs["bytes_fixed_array"] == [ascii, "foobar"]
assert ds.attrs["int"] == 1
assert ds.attrs["intlist"] == 1
np.testing.assert_equal(ds.attrs["int_array"], np.arange(10))
with legacyapi.Dataset(tmp_local_netcdf, mode="r") as ds:
assert ds.unicode == unicode
assert ds.unicode_0dim == unicode
assert ds.unicode_1dim == unicode
assert ds.unicode_arrary == [unicode, "foobár"]
assert ds.unicode_list == unicode
# bytes and strings are received as strings for h5py3
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
ascii = "ascii"
foobar = "foobar"
# and bytes for h5py2
else:
ascii = b"ascii"
foobar = b"foobar"
assert ds.ascii == "ascii"
assert ds.ascii_0dim == ascii
assert ds.ascii_1dim == ascii
assert ds.ascii_array == [ascii, foobar]
# list is decoded for h5py2
assert ds.ascii_list == "ascii"
assert ds.bytes == ascii
assert ds.bytes_0dim == ascii
assert ds.bytes_1dim == ascii
assert ds.bytes_array == [ascii, foobar]
# list is decoded for h5py2
assert ds.bytes_list == "ascii"
assert ds.unicode_fixed == unicode
assert ds.unicode_fixed_0dim == unicode
assert ds.unicode_fixed_1dim == unicode
assert ds.unicode_fixed_arrary == [unicode, "foobár"]
ascii = "ascii"
assert ds.ascii_fixed == ascii
assert ds.ascii_fixed_0dim == ascii
assert ds.ascii_fixed_1dim == ascii
assert ds.ascii_fixed_array == [ascii, "foobar"]
assert ds.bytes_fixed == ascii
assert ds.bytes_fixed_0dim == ascii
assert ds.bytes_fixed_1dim == ascii
assert ds.bytes_fixed_array == [ascii, "foobar"]
assert ds.int == 1
assert ds.intlist == 1
np.testing.assert_equal(ds.int_array, np.arange(10))
with netCDF4.Dataset(tmp_local_netcdf, mode="r") as ds:
assert ds.unicode == unicode
assert ds.unicode_0dim == unicode
assert ds.unicode_1dim == unicode
assert ds.unicode_arrary == [unicode, "foobár"]
assert ds.unicode_list == unicode
ascii = "ascii"
assert ds.ascii == ascii
assert ds.ascii_0dim == ascii
assert ds.ascii_1dim == ascii
assert ds.ascii_array == [ascii, "foobar"]
assert ds.ascii_list == ascii
assert ds.bytes == ascii
assert ds.bytes_0dim == ascii
assert ds.bytes_1dim == ascii
assert ds.bytes_array == [ascii, "foobar"]
# writing/reading lists is broken with h5py2/netCDF4
if version.parse(h5py.__version__) >= version.parse("3.0.0"):
assert ds.bytes_list == ascii
assert ds.unicode_fixed == unicode
assert ds.unicode_fixed_0dim == unicode
assert ds.unicode_fixed_1dim == unicode
assert ds.unicode_fixed_arrary == [unicode, "foobár"]
assert ds.ascii_fixed == ascii
assert ds.ascii_fixed_0dim == ascii
assert ds.ascii_fixed_1dim == ascii
assert ds.ascii_fixed_array == [ascii, "foobar"]
assert ds.bytes_fixed == ascii
assert ds.bytes_fixed_0dim == ascii
assert ds.bytes_fixed_1dim == ascii
assert ds.bytes_fixed_array == [ascii, "foobar"]
assert ds.int == 1
assert ds.intlist == 1
np.testing.assert_equal(ds.int_array, np.arange(10))
|
shoyer/h5netcdf
|
h5netcdf/tests/test_h5netcdf.py
|
Python
|
bsd-3-clause
| 74,613
|
[
"NetCDF"
] |
c5c791c16ea70e8f6a0178969bafda9c7689ddfcb83bc45e7c9c105f6c7eea7f
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_applicationprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ApplicationProfile Avi RESTful Object
description:
- This module is used to configure ApplicationProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
description:
description:
- User defined description for the object.
dns_service_profile:
description:
- Specifies various dns service related controls for virtual service.
dos_rl_profile:
description:
- Specifies various security related controls for virtual service.
http_profile:
description:
- Specifies the http application proxy profile parameters.
name:
description:
- The name of the application profile.
required: true
preserve_client_ip:
description:
- Specifies if client ip needs to be preserved for backend connection.
- Not compatible with connection multiplexing.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tcp_app_profile:
description:
- Specifies the tcp application proxy profile parameters.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Specifies which application layer proxy is enabled for the virtual service.
- Enum options - APPLICATION_PROFILE_TYPE_L4, APPLICATION_PROFILE_TYPE_HTTP, APPLICATION_PROFILE_TYPE_SYSLOG, APPLICATION_PROFILE_TYPE_DNS,
- APPLICATION_PROFILE_TYPE_SSL.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the application profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create an Application Profile for HTTP application enabled for SSL traffic
avi_applicationprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
http_profile:
cache_config:
age_header: true
aggressive: false
date_header: true
default_expire: 600
enabled: false
heuristic_expire: false
max_cache_size: 0
max_object_size: 4194304
mime_types_group_refs:
- admin:System-Cacheable-Resource-Types
min_object_size: 100
query_cacheable: false
xcache_header: true
client_body_timeout: 0
client_header_timeout: 10000
client_max_body_size: 0
client_max_header_size: 12
client_max_request_size: 48
compression_profile:
compressible_content_ref: admin:System-Compressible-Content-Types
compression: false
remove_accept_encoding_header: true
type: AUTO_COMPRESSION
connection_multiplexing_enabled: true
hsts_enabled: false
hsts_max_age: 365
http_to_https: false
httponly_enabled: false
keepalive_header: false
keepalive_timeout: 30000
max_bad_rps_cip: 0
max_bad_rps_cip_uri: 0
max_bad_rps_uri: 0
max_rps_cip: 0
max_rps_cip_uri: 0
max_rps_unknown_cip: 0
max_rps_unknown_uri: 0
max_rps_uri: 0
post_accept_timeout: 30000
secure_cookie_enabled: false
server_side_redirect_to_https: false
spdy_enabled: false
spdy_fwd_proxy_mode: false
ssl_client_certificate_mode: SSL_CLIENT_CERTIFICATE_NONE
ssl_everywhere_enabled: false
websockets_enabled: true
x_forwarded_proto_enabled: false
xff_alternate_name: X-Forwarded-For
xff_enabled: true
name: System-HTTP
tenant_ref: admin
type: APPLICATION_PROFILE_TYPE_HTTP
"""
RETURN = '''
obj:
description: ApplicationProfile (api/applicationprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
dns_service_profile=dict(type='dict',),
dos_rl_profile=dict(type='dict',),
http_profile=dict(type='dict',),
name=dict(type='str', required=True),
preserve_client_ip=dict(type='bool',),
tcp_app_profile=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'applicationprofile',
set([]))
if __name__ == '__main__':
main()
|
ravibhure/ansible
|
lib/ansible/modules/network/avi/avi_applicationprofile.py
|
Python
|
gpl-3.0
| 6,723
|
[
"VisIt"
] |
9f29b7abd53c591e48fd45d7bb45f22e49b7757958e1d95d07146048db1d9714
|
import operator as op
from functools import partial
import logging
import lib.const as C
import lib.visit as v
from .. import add_artifacts
from .. import util
from .. import sample
from ..meta import class_lookup
from ..meta.template import Template
from ..meta.clazz import Clazz
from ..meta.method import Method
from ..meta.field import Field
from ..meta.statement import Statement, to_statements
from ..meta.expression import Expression, to_expression, gen_E_gen
class Adapter(object):
def __init__(self, smpls):
self._smpls = smpls
self._smpl_clss = map(class_lookup, sample.decls(smpls).keys())
self._clss = []
self._aux_name = C.ADP.AUX
self._aux = None
@property
def aux_name(self):
return self._aux_name
@property
def aux(self):
return self._aux
@aux.setter
def aux(self, v):
self._aux = v
@v.on("node")
def visit(self, node):
"""
This is the generic method to initialize the dynamic dispatcher
"""
# find possible classes for Getter/Setter
# assume all classes (except for interfaces) are candidates
def find_clss_involved(self, tmpl):
for cls in util.flatten_classes(tmpl.classes, "inners"):
# ignore interface
if cls.is_itf and not cls.subs:
logging.debug("ignore interface {}".format(cls.name))
continue
self._clss.append(cls)
# assume methods that participate will be neither <init> nor static
def is_candidate_cls(self, cls):
return util.exists(lambda c: c <= cls, self._smpl_clss)
# assume methods that participate will be neither <init> nor static
@staticmethod
def is_candidate_mtd(mtd):
return not mtd.is_init and not mtd.is_static and not mtd.params
# retrieve candidate methods (in general)
@staticmethod
def get_candidate_mtds(cls):
mtds = cls.mtds
if cls.is_itf and cls.subs:
mtds = util.flatten(map(op.attrgetter("mtds"), cls.subs))
return filter(Adapter.is_candidate_mtd, mtds)
# add a global counter
@staticmethod
def add_global_counter(aux, fname):
z = to_expression(u"0")
d = Field(clazz=aux, mods=C.PRST, typ=C.J.i, name=fname, init=z)
aux.add_flds([d])
return d
# restrict call stack for the given method via a global counter
@staticmethod
def limit_depth(aux, mtd, depth):
fname = mtd.name + "_depth"
Adapter.add_global_counter(aux, fname)
prologue = to_statements(mtd, u"""
if ({fname} > {depth}) return;
{fname} = {fname} + 1;
""".format(**locals()))
epilogue = to_statements(mtd, u"""
{fname} = {fname} - 1;
""".format(**locals()))
mtd.body = prologue + mtd.body + epilogue
# a method that calls the adaptee
@staticmethod
def call_adaptee(aux, clss):
callee = u'_'.join(["rcv", aux.name])
# NOTE: piggy-back on Accessor's global array
rcv = u"_prvt_fld[" + getattr(aux, C.ADP.FLD) + u"]"
params = [(C.J.i, u"mtd_id"), (aux.name, callee)]
reflect = Method(clazz=aux, mods=C.PBST, params=params, name=u"call_adaptee")
def switch( cls ):
mtds = Adapter.get_candidate_mtds(cls)
def invoke(mtd):
cls = mtd.clazz
# if there is no implementer for this method in interface, ignore it
if cls.is_itf and not cls.subs: return u''
if len(mtd.params) != 0 or mtd.typ != C.J.v: return u''
call = u"""
if ({0} != null && {0}.{1} != null) {{
{0}.{1}.{2}();
}}
""".format(callee, rcv, mtd.name)
adaptee_id = getattr(aux, "adaptee")
return u"if ({adaptee_id} == {mtd.id}) {{ {call} }}".format(**locals())
invocations = filter(None, map(invoke, mtds))
return "\nelse ".join(invocations)
tests = filter(None, map(switch, clss))
prefix = u"if (" + getattr(aux, C.ADP.ADPT) + u" == mtd_id) {\n"
reflect.body = to_statements(reflect, prefix + u"\nelse ".join(tests) + u"\n}")
Adapter.limit_depth(aux, reflect, 2)
aux.add_mtds([reflect])
setattr(aux, "call_adaptee", reflect)
##
## generate an aux type
##
def gen_aux_cls(self, tmpl):
tmpl.acc_auxs.append(self.aux_name)
aux = Clazz(name=self.aux_name, mods=[C.mod.PB], subs=self._clss)
self.aux = aux
# set role variables
def set_role(role):
setattr(aux, role, '_'.join([role, aux.name]))
map(set_role, C.adp_roles)
# add fields that stand for non-deterministic role choices
def aux_fld(init, ty, nm):
if hasattr(aux, nm): nm = getattr(aux, nm)
return Field(clazz=aux, mods=[C.mod.ST], typ=ty, name=nm, init=init)
hole = to_expression(C.T.HOLE)
aux_int = partial(aux_fld, hole, C.J.i)
c_to_e = lambda c: to_expression(unicode(c))
mtds = util.flatten(map(Adapter.get_candidate_mtds, self._clss))
## range check
rg_chk = Method(clazz=aux, mods=[C.mod.ST, C.mod.HN], name=u"checkRange")
checkers = []
gen_range = lambda ids: gen_E_gen(map(c_to_e, util.rm_dup(ids)))
get_id = op.attrgetter("id")
# range check for an adapter index, which shouldn't be negative
def chk_positive(role):
rv = getattr(aux, role)
checkers.append("assert {} >= 0;".format(rv))
map(chk_positive, [C.ADP.FLD])
mtd_ids = map(get_id, mtds)
mtd_init = gen_range(mtd_ids)
aux_int_adap = partial(aux_fld, mtd_init, C.J.i)
adapter_roles = [C.ADP.ADPT, C.ADP.ADPE]
adapter_flds = map(aux_int_adap, adapter_roles)
aux.add_flds(adapter_flds + [aux_int(C.ADP.FLD)])
Adapter.call_adaptee(aux, self._clss)
#checkers.append(u"assert (argNum(" + getattr(aux, C.ADP.ADPT) + ")) == 0 && (argNum(" + getattr(aux, C.ADP.ADPT) + ")) == 0;")
#checkers.append(u"assert (retType(" + getattr(aux, C.ADP.ADPT) + ")) == -1 && (retType(" + getattr(aux, C.ADP.FLD) + ")) == -1;")
add_artifacts([aux.name])
return aux
@v.when(Template)
def visit(self, node):
self.find_clss_involved(node)
aux = self.gen_aux_cls(node)
node.add_classes([aux])
@v.when(Clazz)
def visit(self, node): pass
@v.when(Field)
def visit(self, node): pass
@v.when(Method)
def visit(self, node):
# skip the method with explicit annotations, e.g., @Factory
if node.annos: return
# skip java.lang.*
if node.clazz.pkg in ["java.lang"]: return
# can't edit interface's methods as well as client side
if node.clazz.is_itf or node.clazz.client: return
cname = node.clazz.name
# adapter candidate
if len(node.params) == 0 and node.typ == C.J.v and not node.is_static:
mname = u"call_adaptee"
args = u", ".join([unicode(node.id), C.J.THIS])
call = u"{}({});".format(u".".join([self.aux_name, mname]), args)
node.body += to_statements(node, call)
logging.debug("{}.{} => {}.{}".format(cname, node.name, self.aux_name, mname))
@v.when(Statement)
def visit(self, node): return [node]
@v.when(Expression)
def visit(self, node): return node
|
plum-umd/pasket
|
pasket/rewrite/adapter.py
|
Python
|
mit
| 6,903
|
[
"VisIt"
] |
0ab3bf2f18273e0eb492e0a9daa795a4e10c907f60aa612f1589c6f39ed6ac6e
|
# 10/24/2013 Sebastian Raschka
# BondLab PyMOL plugin
from tkinter import *
from pymol import cmd
import sys, zlib, string
def __init__(self):
"""adds plugin to PyMOL menu"""
self.menuBar.addmenuitem('Plugin', 'command',
'BondLab',
label = 'BondLab',
command = lambda : open_menu())
def open_menu():
global master
master = Tk()
master.wm_geometry("300x440")
master.title('Customize Bonds')
Button(master, text='OK', command=close).pack(side=BOTTOM)
Label(master, text="Labels").pack()
labels = IntVar(master=master)
Radiobutton(master, text='on', variable=labels, value=1,
command=lambda: show_labels(True)).pack(anchor=W)
Radiobutton(master, text='off', variable=labels, value=2,
command=lambda: show_labels(False)).pack(anchor=W)
separator1 = Frame(master, height=2, bd=1, relief=SUNKEN)
separator1.pack(fill=X, padx=5, pady=5)
Label(master, text="Line Width").pack()
line_width = IntVar(master=master)
Radiobutton(master, text='light', variable=line_width, value=1,
command=lambda: adjust_width(1)).pack(anchor=W)
Radiobutton(master, text='medium', variable=line_width, value=2,
command=lambda: adjust_width(2)).pack(anchor=W)
Radiobutton(master, text='heavy', variable=line_width, value=3,
command=lambda: adjust_width(3)).pack(anchor=W)
separator2 = Frame(master, height=2, bd=1, relief=SUNKEN)
separator2.pack(fill=X, padx=5, pady=5)
Label(master, text="Dash Gaps").pack()
dash_gaps = IntVar(master=master)
Radiobutton(master, text='none', variable=dash_gaps, value=1,
command=lambda: adjust_gaps(1)).pack(anchor=W)
Radiobutton(master, text='narrow', variable=dash_gaps, value=2,
command=lambda: adjust_gaps(2)).pack(anchor=W)
Radiobutton(master, text='normal', variable=dash_gaps, value=3,
command=lambda: adjust_gaps(3)).pack(anchor=W)
Radiobutton(master, text='wide', variable=dash_gaps, value=4,
command=lambda: adjust_gaps(4)).pack(anchor=W)
separator2 = Frame(master, height=2, bd=1, relief=SUNKEN)
separator2.pack(fill=X, padx=5, pady=5)
Label(master, text="Dash Color").pack()
dash_color = IntVar(master=master)
Radiobutton(master, text='yellow', variable=dash_color, value=1,
command=lambda: adjust_color(1)).pack(anchor=W)
Radiobutton(master, text='blue', variable=dash_color, value=2,
command=lambda: adjust_color(2)).pack(anchor=W)
Radiobutton(master, text='green', variable=dash_color, value=3,
command=lambda: adjust_color(3)).pack(anchor=W)
Radiobutton(master, text='red', variable=dash_color, value=4,
command=lambda: adjust_color(4)).pack(anchor=W)
Radiobutton(master, text='white', variable=dash_color, value=5,
command=lambda: adjust_color(5)).pack(anchor=W)
Radiobutton(master, text='black', variable=dash_color, value=6,
command=lambda: adjust_color(6)).pack(anchor=W)
mainloop()
def show_labels(show):
if show:
cmd.show("labels")
else:
cmd.hide("labels")
def adjust_width(width):
widths = {1:"0.03", 2:"0.1", 3:"0.2"}
cmd.set("dash_radius", widths[width])
def adjust_gaps(gap):
gaps = {1:"0.0", 2:"0.25", 3:"0.5", 4:"0.75"}
cmd.set("dash_gap", gaps[gap])
def adjust_color(col_ind):
colors = {1:"yellow", 2:"blue", 3:"green", 4:"red", 5:"white", 6:"black"}
cmd.set("dash_color", colors[col_ind])
def close():
master.destroy()
|
rasbt/BondPack
|
src/BondLab.py
|
Python
|
gpl-3.0
| 3,647
|
[
"PyMOL"
] |
cff6cbd8266e401290c03588c5252a828068cac6f2169ae36a227f1dd056532d
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
from sqlalchemy.sql import asc
from Core.paconf import PA
from Core.db import session
from Core.maps import Updates, Galaxy, Planet, Attack
from Core.loadable import loadable, route
class attack(loadable):
"""Create an attack page on the webby with automatic parsed scans"""
usage = " [<eta|landingtick> <coordlist> [comment]] | [list] | [show <id>]"
access = "half"
@route(r"(?:list)?")
def list(self,message,user,params):
Q = session.query(Attack)
Q = Q.filter(Attack.landtick >= Updates.current_tick() - Attack._active_ticks)
Q = Q.order_by(asc(Attack.id))
replies = []
for attack in Q:
replies.append("(%d LT: %d %s)" %(attack.id,attack.landtick,attack.comment,))
reply = "Open attacks: " + " ".join(replies)
message.reply(reply)
@route(r"(?:show\s+)?(\d+)")
def show(self,message,user,params):
id = params.group(1)
attack = Attack.load(id)
if attack is None:
message.alert("No attack exists with id %s" %(id))
return
message.reply(str(attack))
@route(r"(?:new\s+)?(\d+)\s+([. :\-\d,]+)(?:\s*(.+))?")
def new(self, message, user, params):
tick = Updates.current_tick()
comment = params.group(3) or ""
when = int(params.group(1))
if when < PA.getint("numbers", "protection"):
eta = when
when += tick
elif when <= tick:
message.alert("Can not create attacks in the past. You wanted tick %s, but current tick is %s." % (when, tick,))
return
else:
eta = when - tick
if when > 32767:
when = 32767
attack = Attack(landtick=when,comment=comment)
session.add(attack)
for coord in re.findall(loadable.coord, params.group(2)):
if not coord[4]:
galaxy = Galaxy.load(coord[0],coord[2])
if galaxy:
attack.addGalaxy(galaxy)
else:
planet = Planet.load(coord[0],coord[2],coord[4])
if planet:
attack.addPlanet(planet)
session.commit()
message.reply(str(attack))
|
ellonweb/merlin
|
Hooks/target/attack.py
|
Python
|
gpl-2.0
| 3,294
|
[
"Galaxy"
] |
d4265a7ebac4bad39f79bc3705becb3f37b8dfca9c47a059692b3408b33ebcd3
|
__author__ = 'Mike McCann'
__copyright__ = '2011'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
View functions to supoprt the main query web page
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
from tools.colormaps import cmaps
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.db.utils import ConnectionDoesNotExist
from django.views.decorators.cache import cache_page
from utils.STOQSQManager import STOQSQManager
from utils import encoders
import json
import pprint
import csv
import random
import string
import time
import logging
from .wms import ActivityView
from os import path
from utils.MPQuery import MPQuerySet
logger = logging.getLogger(__name__)
class InvalidMeasuredParameterQueryException(Exception):
pass
class NoParameterSelectedException(Exception):
pass
# Mapping from HTTP request parameters to what STOQSQueryManager needs
query_parms = {
'sampledparametersgroup': 'sampledparametersgroup',
'measuredparametersgroup': 'measuredparametersgroup',
'parameterstandardname': 'parameterstandardname',
'parameterminmax': 'parameterminmax', # Array of name, min, max in hash keyed by 'dataaccess' and 'plot'
'flotlimits': ('xaxis_min', 'xaxis_max', 'yaxis_min', 'yaxis_max'), # Flot plot axis limits
'time': ('start_time','end_time'), # Single values
'depth': ('min_depth', 'max_depth'), # Single values
'simpledepthtime': [], # List of x,y values
'platforms': 'platforms', # Specified once in the query string for each platform.
'parametervalues': [], # Appended to below with any _MIN _MAX request items
'parameterparameter': ('px', 'py', 'pz', 'pc', # Parameters to plot
'xlog', 'ylog', 'zlog', 'clog'), # Flags for log-scale
# TODO: Could simplify these flags by putting them into a dictionary...
'get_actual_count': 'get_actual_count', # Flag value from checkbox
'showsigmatparametervalues': 'showsigmatparametervalues', # Flag value from checkbox
'showstandardnameparametervalues': 'showstandardnameparametervalues', # Flag value from checkbox
'showallparametervalues': 'showallparametervalues', # Flag value from checkbox
'showparameterplatformdata': 'showparameterplatformdata', # Flag value from checkbox
'parameterplot': ('parameterplotid', # Plot radio button selection
'platformplotname'), # - client knows platform name
'parametercontourplot': ('parametercontourplotid', # Plot contour radio button selection
'platformcontourplotname'),
'parametertimeplotid': 'parametertimeplotid', # Plot checkbox id values
'parametertimeplotcoord': 'parametertimeplotcoord', # Plot checkbox coordinate names
'showgeox3dmeasurement': 'showgeox3dmeasurement', # Flag value from checkbox
'slice_minutes': 'slice_minutes', # Parameter for visualization
'showgeox3dsample': 'showgeox3dsample', # Flag value from checkbox
'showplatforms': 'showplatforms', # Flag value from checkbox
'showdataas': 'showdataas', # Value from radio button, either 'contour' or 'scatter'
'cm': 'cm', # Value from colormap picker
'updatefromzoom': 'updatefromzoom', # To inform how to updateTemporal()
'only': 'only', # List of options to update - when only a partial response is needed
'except': 'except', # List of options not to update - when all but listed items are needed
'parametertab': 'parametertab', # = 1 if Parameter/Station tab is active and full resolution timeSeries data is needed
'secondsperpixel': 'secondsperpixel', # Resolution of time-depth-flot window
'x3dterrains': 'x3dterrains', # Hash of 3D Terrain info
'x3dplaybacks': 'x3dplaybacks', # X3D Playback info
'resources': 'resources', # Hash of Resources for Activities in the selection
've': 've', # Vertical Exaggeration of selected terrain in UI
'geoorigin': 'geoorigin', # GeoOrigin of selected terrain in UI
'ppfr': 'ppfr', # Parameter-Parameter free range flag
'pplr': 'pplr', # Parameter-Parameter linear regression flag
'ppsl': 'ppsl', # Parameter-Parameter sample locations flag
'ppns': 'ppns', # Parameter-Parameter no-stride flag
'mplabels': 'mplabels', # MeasuredParameter labels from Attributes selections
'activitynames': 'activitynames', # Activities (NetCDF files) selected for plotting
'full_screen': 'full_screen', # For making higher resolution graphics
'cmincmax_lock': 'cmincmax_lock', # Preserve colormap min and max values from the UI
'speedup': 'speedup', # platformanimation speed factor
'sn_colormap': 'sn_colormap', # Flag value from checkbox
}
def _buildMapFile(request, qm, options):
if 'platforms' not in json.loads(options):
return
# 'mappath' should be in the session from the call to queryUI() set it here in case it's not set by queryUI()
if 'mappath' in request.session:
logger.debug("Reusing request.session['mappath'] = %s", request.session['mappath'])
else:
request.session['mappath'] = __name__ + '_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=10)) + '.map'
logger.debug("Setting new request.session['mappath'] = %s", request.session['mappath'])
# A rudimentary class of items for passing a list of them to the activity.map template
class Item(object):
def __repr__(self):
return '%s %s %s %s' % (self.id, self.name, self.color, self.geo_query,)
# Add an item (a mapfile layer) for each platform - unioned up
item_list = [] # Replicates queryset from an Activity query (needs name & id) with added geo_query & color attrbutes
if qm.kwargs['platforms']:
# Build only the layers of Platforms selected in the UI
platform_layer_types = []
for plat_selected in qm.kwargs['platforms']:
platform_layers = []
for plat_type, plats in json.loads(options)['platforms'].items():
for plat in plats:
if plat[0] == plat_selected:
platform_layers.append(plat)
platform_layer_types.append(platform_layers)
else:
# Build all the Platform layers
platform_layer_types =list(json.loads(options)['platforms'].values())
trajectory_union_layer_string = ''
for plats in platform_layer_types:
for p in plats:
# TODO: Test whether it's a point or track for trajectoryprofile data
if p[3].lower() != 'trajectory':
continue
item = Item()
item.id = p[1]
item.name = p[0]
trajectory_union_layer_string += str(item.name) + ','
item.color = '"#%s"' % p[2]
item.type = 'line'
item.extra_style = ''
item.geo_query = qm.getActivityGeoQuery(Q(platform__name='%s' % p[0]))
item_list.append(item)
station_union_layer_string = ''
for plats in platform_layer_types:
for p in plats:
# First trajectoryprofile dataset is IMOS-EAC in which the trajectory is just variation in depth, so plot as a station
# TODO: Test whether it's a point or track for trajectoryprofile data
if p[3].lower() != 'timeseries' and p[3].lower() != 'timeseriesprofile' and p[3].lower() != 'trajectoryprofile':
continue
item = Item()
item.id = p[1]
item.name = p[0]
station_union_layer_string += str(item.name) + ','
item.color = '"#%s"' % p[2]
item.type = 'point'
item.extra_style = 'SYMBOL "circle"\n SIZE 7.0\n OUTLINECOLOR 1 1 1'
item.geo_query = qm.getActivityGeoQuery(Q(platform__name='%s' % p[0]), pointFlag=True)
item_list.append(item)
# Add an item for the samples for the existing query - do not add it to the union, it's a different type
sample_geo_query = qm.getSampleGeoQuery()
if sample_geo_query:
item = Item()
item.id = 'sample_points'
item.name = 'sample_points'
item.color = '255 255 255'
item.type = 'point'
item.geo_query = sample_geo_query
item.extra_style = 'SYMBOL "circle"\n SIZE 7.0\n OUTLINECOLOR 0 0 0 '
item_list.append(item)
trajectory_union_layer_string = trajectory_union_layer_string[:-1]
station_union_layer_string = station_union_layer_string[:-1]
##logger.debug('item_list = %s', pprint.pformat(item_list))
av = ActivityView(request, item_list, trajectory_union_layer_string, station_union_layer_string)
av.generateActivityMapFile()
# Cache responses from this view for 15 minutes
@cache_page(60 * 15)
def queryData(request, fmt=None):
'''
Process data requests from the main query web page. Returns both summary Activity and actual MeasuredParameter data
as retreived from STOQSQManager.
'''
response = HttpResponse()
params = {}
for key, value in list(query_parms.items()):
if type(value) in (list, tuple):
params[key] = [request.GET.get(p, None) for p in value]
else:
params[key] = request.GET.getlist(key)
# Look for any parameter _MIN & _MAX input from the UI. After retrieving the above query_parms the
# only thing left in the request QueryDict should be the parameter _MIN _MAX selections.
for key, value in list(request.GET.items()):
if key.endswith('_MIN'): # Just test for _MIN; UI will always provide _MIN & _MAX
name = key.split('_MIN')[0]
try:
pminmax = {name: (request.GET.getlist(name + '_MIN')[0], request.GET.getlist(name + '_MAX')[0])}
except:
logger.exception('Could not get parameter values even though ' + key + ' ends with _MIN')
params['parametervalues'].append(pminmax)
logger.debug('Adding to parametervalues: %s', pprint.pformat(pminmax))
# To support unit testing and follow-on expectation that dbAlias is in request METAdata
if 'dbAlias' not in request.META:
request.META['dbAlias'] = dbAlias
logger.debug('Instantiating STOQSQManager with params = %s', params)
qm = STOQSQManager(request, response, request.META['dbAlias'], **params)
try:
qm.buildQuerySets()
except ValidationError as e:
logger.error(str(e))
return HttpResponseBadRequest('Bad request: ' + str(e))
except ConnectionDoesNotExist as e:
logger.error(str(e))
return HttpResponseBadRequest('Bad request: Database "' + request.META['dbAlias'] + '" Does Not Exist')
try:
start_time = time.time()
options = json.dumps(qm.generateOptions(), cls=encoders.STOQSJSONEncoder)
logger.info(f"generateOptions() took {1000*(time.time()- start_time):6.1f} ms to build query/summary response")
except ConnectionDoesNotExist as e:
logger.warn(e)
return HttpResponseNotFound('The database alias <b>%s</b> does not exist on this server.' % dbAlias)
##logger.debug('options = %s', pprint.pformat(options))
##logger.debug('len(simpledepthtime) = %d', len(json.loads(options)['simpledepthtime']))
if not fmt: # here we export in a given format, or just provide summary data if no format is given.
response['Content-Type'] = 'text/json'
response.write(options)
elif fmt == 'json':
response['Content-Type'] = 'text/json'
response.write(serializers.serialize('json', qm.qs))
elif fmt == 'dap':
logger.info('dap output')
return response
# Do not cache this "view", it creates the mapfile
def queryMap(request):
'''
Build the mapfile in a separate view
'''
response = HttpResponse()
params = {}
for key, value in list(query_parms.items()):
if type(value) in (list, tuple):
params[key] = [request.GET.get(p, None) for p in value]
else:
params[key] = request.GET.getlist(key)
# The Javascript that constructs the request items must remove any items that will make the
# server busy with requests that have nothing to do with making a map; for example, removing
# 'parameterparameterpng' and 'parameterparameterx3d' removed from 'only' helps speed things up.
logger.debug('Instantiating STOQSQManager with params = %s', params)
qm = STOQSQManager(request, response, request.META['dbAlias'], **params)
qm.buildQuerySets()
start_time = time.time()
options = json.dumps(qm.generateOptions(), cls=encoders.STOQSJSONEncoder)
logger.info(f"generateOptions() took {1000*(time.time()- start_time):7.1f} ms to build query/map response")
##logger.debug('options = %s', pprint.pformat(options))
_buildMapFile(request, qm, options)
response['Content-Type'] = 'text/json'
response.write(options)
return response
# Do not cache this "view", otherwise the incorrect url_mappath is used
def queryUI(request):
'''
Build and return main query web page
'''
##request.session.flush()
if 'mappath' in request.session:
logger.debug("Reusing request.session['mappath'] = %s", request.session['mappath'])
else:
request.session['mappath'] = __name__ + '_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=10)) + '.map'
logger.debug("Setting new request.session['mappath'] = %s", request.session['mappath'])
# Use list of tuples to preserve order
formats=[
('parquet', 'Apache Parquet binary column format', ),
('kml', 'Keyhole Markup Language - click on icon to view in Google Earth', ),
('sql', 'Structured Query Language for PostgreSQL', ),
('matlab', 'Matlab - Load data into Matlab structure array', ),
('json', 'JavaScript Object Notation', ),
('csv', 'Comma Separated Values', ),
('tsv', 'Tabbed Separated Values', ),
('html', 'Hyper Text Markup Language table', ),
]
config_settings = {'site_uri': request.build_absolute_uri('/')[:-1],
'formats': formats,
'colormaps': cmaps,
'mapserver_scheme': settings.MAPSERVER_SCHEME,
'mapserver_host': settings.MAPSERVER_HOST,
'mappath': path.join(settings.URL_MAPFILE_DIR, request.session['mappath']),
'home_page_link': settings.HOME_PAGE_LINK,
'home_page_logo': settings.HOME_PAGE_LOGO,
'home_page_alt': settings.HOME_PAGE_ALT,
}
try:
config_settings['google_analytics_code'] = settings.GOOGLE_ANALYTICS_CODE
except AttributeError:
pass
return render(request, 'stoqsquery.html', context=config_settings)
|
MBARIMike/stoqs
|
stoqs/stoqs/views/query.py
|
Python
|
gpl-3.0
| 16,730
|
[
"NetCDF"
] |
328860092a0a086d9ef2873275476ae60473a28d6430c43e0ce9ea9f1fdcbe4c
|
import multiprocessing
import threading
import weakref
from typing import Any, MutableMapping, Optional
try:
from dask.utils import SerializableLock
except ImportError:
# no need to worry about serializing the lock
SerializableLock = threading.Lock
try:
from dask.distributed import Lock as DistributedLock
except ImportError:
DistributedLock = None
# Locks used by multiple backends.
# Neither HDF5 nor the netCDF-C library are thread-safe.
HDF5_LOCK = SerializableLock()
NETCDFC_LOCK = SerializableLock()
_FILE_LOCKS: MutableMapping[Any, threading.Lock] = weakref.WeakValueDictionary()
def _get_threaded_lock(key):
try:
lock = _FILE_LOCKS[key]
except KeyError:
lock = _FILE_LOCKS[key] = threading.Lock()
return lock
def _get_multiprocessing_lock(key):
# TODO: make use of the key -- maybe use locket.py?
# https://github.com/mwilliamson/locket.py
del key # unused
return multiprocessing.Lock()
_LOCK_MAKERS = {
None: _get_threaded_lock,
"threaded": _get_threaded_lock,
"multiprocessing": _get_multiprocessing_lock,
"distributed": DistributedLock,
}
def _get_lock_maker(scheduler=None):
"""Returns an appropriate function for creating resource locks.
Parameters
----------
scheduler : str or None
Dask scheduler being used.
See Also
--------
dask.utils.get_scheduler_lock
"""
return _LOCK_MAKERS[scheduler]
def _get_scheduler(get=None, collection=None) -> Optional[str]:
"""Determine the dask scheduler that is being used.
None is returned if no dask scheduler is active.
See also
--------
dask.base.get_scheduler
"""
try:
# Fix for bug caused by dask installation that doesn't involve the toolz library
# Issue: 4164
import dask
from dask.base import get_scheduler # noqa: F401
actual_get = get_scheduler(get, collection)
except ImportError:
return None
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return "distributed"
except (ImportError, AttributeError):
pass
try:
# As of dask=2.6, dask.multiprocessing requires cloudpickle to be installed
# Dependency removed in https://github.com/dask/dask/pull/5511
if actual_get is dask.multiprocessing.get:
return "multiprocessing"
except AttributeError:
pass
return "threaded"
def get_write_lock(key):
"""Get a scheduler appropriate lock for writing to the given resource.
Parameters
----------
key : str
Name of the resource for which to acquire a lock. Typically a filename.
Returns
-------
Lock object that can be used like a threading.Lock object.
"""
scheduler = _get_scheduler()
lock_maker = _get_lock_maker(scheduler)
return lock_maker(key)
def acquire(lock, blocking=True):
"""Acquire a lock, possibly in a non-blocking fashion.
Includes backwards compatibility hacks for old versions of Python, dask
and dask-distributed.
"""
if blocking:
# no arguments needed
return lock.acquire()
elif DistributedLock is not None and isinstance(lock, DistributedLock):
# distributed.Lock doesn't support the blocking argument yet:
# https://github.com/dask/distributed/pull/2412
return lock.acquire(timeout=0)
else:
# "blocking" keyword argument not supported for:
# - threading.Lock on Python 2.
# - dask.SerializableLock with dask v1.0.0 or earlier.
# - multiprocessing.Lock calls the argument "block" instead.
return lock.acquire(blocking)
class CombinedLock:
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, blocking=True):
return all(acquire(lock, blocking=blocking) for lock in self.locks)
def release(self):
for lock in self.locks:
lock.release()
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class DummyLock:
"""DummyLock provides the lock API without any actual locking."""
def acquire(self, blocking=True):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def locked(self):
return False
def combine_locks(locks):
"""Combine a sequence of locks into a single lock."""
all_locks = []
for lock in locks:
if isinstance(lock, CombinedLock):
all_locks.extend(lock.locks)
elif lock is not None:
all_locks.append(lock)
num_locks = len(all_locks)
if num_locks > 1:
return CombinedLock(all_locks)
elif num_locks == 1:
return all_locks[0]
else:
return DummyLock()
def ensure_lock(lock):
"""Ensure that the given object is a lock."""
if lock is None or lock is False:
return DummyLock()
return lock
|
xray/xray
|
xarray/backends/locks.py
|
Python
|
apache-2.0
| 5,443
|
[
"NetCDF"
] |
fa546ae3e31da522244f65fbd6785e6c6d50b43ec7a219bddb484782798d0a48
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from tinctest.lib import local_path
from tinctest.runner import TINCTestRunner
from mpp.models import SQLTestCase, SQLTestCaseException
import unittest2 as unittest
import shutil
from contextlib import closing
from datetime import datetime
from StringIO import StringIO
from unittest2.runner import _WritelnDecorator
# we're testing SQLTestCase as it pertains to tinc.py (and only tinc.py)
# as such, any attempts by raw unit2 to discover and load MockSQLTestCase must be averted
@unittest.skip('mock')
class MockSQLTestCase(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
db_name=os.getenv('USER')
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseGenerateAns(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
sql_dir = 'sql_no_ans/'
generate_ans = 'yes'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseForceGenerateAns(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
sql_dir = 'sql_no_ans/'
generate_ans = 'force'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseIncorrectGenerateAns(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
# Misspelled generate_ans. Invalid value.
generate_ans = 'yess'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseGpdiffNoAnsFile(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
sql_dir = 'sql_no_ans/'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseNoGpdiffNoAnsFile(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@gpdiff False
"""
sql_dir = 'sql_no_ans/'
def test_explicit_test_method(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseWithOptimizerOn(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@optimizer_mode on
"""
db_name=os.getenv('USER')
@unittest.skip('mock')
class MockSQLTestCaseWithOptimizerOff(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@optimizer_mode off
"""
db_name=os.getenv('USER')
@unittest.skip('mock')
class MockSQLTestCaseWithOptimizerBoth(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@optimizer_mode both
"""
db_name=os.getenv('USER')
class SQLTestCaseTests(unittest.TestCase):
def test_run_sql_test_failure(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_query02":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02.diff')))
shutil.rmtree(test_case.get_out_dir())
def test_run_sql_test_success(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_query03":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
shutil.rmtree(test_case.get_out_dir())
def test_run_entire_sql_test_case(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case = None
for test_case in test_suite._tests:
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_suite.run(test_result)
# 3 sql files with ans files and 1 explicit method
self.assertEqual(test_result.testsRun, 4)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02.diff')))
shutil.rmtree(test_case.get_out_dir())
def test_verify_setup_teardown(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
for test_case in test_suite._tests:
test_case.__class__.__unittest_skip__ = False
if os.path.exists(local_path("output/")):
shutil.rmtree(local_path("output/"))
test_result = unittest.TestResult()
test_suite.run(test_result)
self.assertEqual(test_result.testsRun, 4)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
# Verify if setup and teardown sqls were executed
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'setup.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'setup', 'setup1.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'teardown.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'teardown', 'teardown1.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_setup.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_teardown.out')))
def test_run_explicit_test_method(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_explicit_test_method":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_with_local_init_file(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCase.test_query04":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_run_no_ans_file(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# Store all test names in a list
test_case_list = []
for temp in test_suite._tests:
test_case_list.append(temp.name)
# Verify that other sql files with ans files and explicit method is in the list
self.assertTrue('MockSQLTestCase.test_explicit_test_method' in test_case_list)
self.assertTrue('MockSQLTestCase.test_query02' in test_case_list)
# Verify that test_query_no_ans_file is not there, even though the sql file is there without the ans file
self.assertTrue('MockSQLTestCase.test_query_no_ans_file' not in test_case_list)
# Verify the default value of generate_ans is no
self.assertTrue(MockSQLTestCase.generate_ans == 'no')
def test_gpdiff_no_ans_file(self):
"""
Test whether we throw an excpetion when there is no ans file for a sql file and if gpdiff is set to True
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseGpdiffNoAnsFile)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseGpdiffNoAnsFile.test_query_no_ans_file":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 1)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_no_gpdiff_no_ans_file(self):
"""
Test whether we construct a test for sqls with no ans files when gpdiff is turned off
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseNoGpdiffNoAnsFile)
# Store all test names in a list
test_case_list = []
for temp in test_suite._tests:
test_case_list.append(temp.name)
# Verify that other sql files with ans files and explicit method is in the list
self.assertTrue('MockSQLTestCaseNoGpdiffNoAnsFile.test_explicit_test_method' in test_case_list)
self.assertTrue('MockSQLTestCaseNoGpdiffNoAnsFile.test_query02' in test_case_list)
# Verify that test_query_no_ans_file is there, even though the sql file is there without the ans file
self.assertTrue('MockSQLTestCaseNoGpdiffNoAnsFile.test_query_no_ans_file' in test_case_list)
def test_run_generate_ans_file_class_variable(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseGenerateAns)
# Store all test names in a list
test_case_list = []
for temp in test_suite._tests:
test_case_list.append(temp.name)
# Verify that other sql files with ans files and explicit method is in the list
self.assertTrue('MockSQLTestCaseGenerateAns.test_explicit_test_method' in test_case_list)
self.assertTrue('MockSQLTestCaseGenerateAns.test_query02' in test_case_list)
# Verify that test_query_no_ans_file is also there, even though its ans file is not there
self.assertTrue('MockSQLTestCaseGenerateAns.test_query_no_ans_file' in test_case_list)
def test_run_incorrect_generate_ans_file_class_variable(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseIncorrectGenerateAns)
count = 0
for test in test_suite._tests:
if 'TINCTestCaseLoadFailure' in str(test):
count += 1
self.assertEquals(count, 1)
def test_run_sql_generate_ans(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseGenerateAns)
# Ans file that will be generated
ans_file = local_path("query_no_ans_file.ans")
# If ans file is there for some reason, remove it (not testing force here)
if os.path.exists(ans_file):
os.remove(ans_file)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseGenerateAns.test_query_no_ans_file":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
# Verify that ans file is generated
self.assertTrue(os.path.exists(local_path("setup.ans")))
self.assertTrue(os.path.exists(ans_file))
self.assertTrue(os.path.exists(local_path("teardown.ans")))
# Cleanup
os.remove(local_path("setup.ans"))
os.remove(ans_file)
os.remove(local_path("teardown.ans"))
def test_run_sql_force_generate_ans(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForceGenerateAns)
# Ans file that will be generated
ans_file = local_path("query_no_ans_file.ans")
# Create the empty ans file to allow force to overwrite
open(ans_file, 'w').close()
self.assertTrue(os.path.getsize(ans_file) == 0)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseForceGenerateAns.test_query_no_ans_file":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
# Verify that ans file is there
self.assertTrue(os.path.exists(local_path("setup.ans")))
self.assertTrue(os.path.exists(ans_file))
self.assertTrue(os.path.exists(local_path("teardown.ans")))
# Verify that ans file size is greater than 0
self.assertTrue(os.path.getsize(ans_file) > 0)
# Cleanup
os.remove(local_path("setup.ans"))
os.remove(ans_file)
os.remove(local_path("teardown.ans"))
def test_run_sql_force_generate_ans_permission_denied(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForceGenerateAns)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query04 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseForceGenerateAns.test_query04":
# query04.ans wouldn't be checked-out from perforce, so it would have no write operation allowed
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 1)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
def test_run_sql_file(self):
test_case = MockSQLTestCase('test_query03')
if os.path.exists(test_case.get_out_dir()):
shutil.rmtree(test_case.get_out_dir())
# Default mode
test_case.run_sql_file(local_path('query03.sql'))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03.out')))
self.assertFalse(self._check_str_in_file('SET optimizer',
os.path.join(test_case.get_out_dir(), 'query03.sql')))
# Optimizer on mode
test_case.run_sql_file(local_path('query03.sql'), optimizer=True)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
self.assertTrue(self._check_str_in_file('SET optimizer=on;',
os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
# Optimizer off mode
test_case.run_sql_file(local_path('query03.sql'), optimizer=False)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(self._check_str_in_file('SET optimizer=off;',
os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
def test_run_sql_test_optimizer_on(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseWithOptimizerOn)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseWithOptimizerOn.test_query03":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
shutil.rmtree(test_case.get_out_dir())
def test_run_sql_test_optimizer_off(self):
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseWithOptimizerOff)
# Find our desired test case in test_suite.
# This code is a consequence of us only having implemented
# loadTestsFromTestCase. An implementation of loadTestsFromNames
# would likely have allowed us to insolate test_query02 directly.
test_case = None
for temp in test_suite._tests:
if temp.name == "MockSQLTestCaseWithOptimizerOff.test_query03":
test_case = temp
self.assertIsNotNone(test_case)
# As explained above, we want MockSQLTestCase to run if and only if
# it's being invoked by our unit tests. So, it's skipped if discovered
# directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
# triggered by unit2, we override MockSQLTestCase's skip decorator to allow
# this explicit construction of MockSQLTestCase to proceed.
test_case.__class__.__unittest_skip__ = False
test_result = unittest.TestResult()
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
shutil.rmtree(test_case.get_out_dir())
def test_run_sql_test_optimizer_both(self):
test_loader = tinctest.TINCTestLoader()
# For data provider test cases, we have to use loadTestsFromName, since loadTestsFromTestCase won't filter and expand
test_suite = test_loader.loadTestsFromName("mpp.models.regress.sql_related.regress_sql_test_case.regress_sql_test_case.MockSQLTestCaseWithOptimizerBoth")
# Find our desired test case in test_suite.
test_case = None
new_test_suite = tinctest.TINCTestSuite()
for temp in test_suite._tests:
if "MockSQLTestCaseWithOptimizerBoth.test_query03" in temp.name:
new_test_suite.addTest(temp)
temp.__class__.__unittest_skip__ = False
test_case = temp
self.assertIsNotNone(new_test_suite)
self.assertEquals(new_test_suite.countTestCases(), 2)
test_result = unittest.TestResult()
new_test_suite.run(test_result)
self.assertEqual(test_result.testsRun, 2)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 0)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(temp.get_out_dir(), 'query03_planner.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=off;",
os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
shutil.rmtree(test_case.get_out_dir())
def _check_str_in_file(self, check_string, file_path):
with open(file_path, 'r') as f:
for line in f:
if check_string in line:
return True
return False
def test_run_sql_test_optimizer_minidump_on_failure(self):
"""
Test whether we gather minidumps on failures when the test is exeucted with optimizer on.
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.regress.sql_related.regress_sql_test_case.' + \
'regress_sql_test_case.' + \
'MockSQLTestCaseWithOptimizerOn.test_query02')
self.assertIsNotNone(test_suite)
self.assertTrue(len(test_suite._tests), 1)
test_result = None
test_case = None
for test in test_suite._tests:
test.__class__.__unittest_skip__ = False
test_case = test
if os.path.exists(test_case.get_out_dir()):
shutil.rmtree(test_case.get_out_dir())
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1)
test_result = tinc_test_runner.run(test_suite)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
# Verify that we collect minidump on failure for optimizer execution mode
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_minidump.mdp')))
@unittest.skip("QAINF-999")
def test_run_sql_test_optimizer_minidump_on_failure2(self):
"""
Test whether we gather minidumps on failures when the test is exeucted with optimizer_mode both.
"""
test_loader = tinctest.TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.regress.sql_related.regress_sql_test_case.' + \
'regress_sql_test_case.' + \
'MockSQLTestCaseWithOptimizerBoth.test_query02')
self.assertIsNotNone(test_suite)
new_test_suite = tinctest.TINCTestSuite()
self.assertEquals(test_suite.countTestCases(), 2)
test_result = None
test_case = None
for test in test_suite._tests:
if 'test_query02_orca' in test.name:
test.__class__.__unittest_skip__ = False
test_case = test
new_test_suite.addTest(test)
self.assertIsNotNone(test_case)
if os.path.exists(test_case.get_out_dir()):
shutil.rmtree(test_case.get_out_dir())
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1)
test_result = tinc_test_runner.run(new_test_suite)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.errors), 0)
self.assertEqual(len(test_result.skipped), 0)
self.assertEqual(len(test_result.failures), 1)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.sql')))
self.assertTrue(self._check_str_in_file("SET optimizer=on;",
os.path.join(test_case.get_out_dir(), 'query02_orca.out')))
# Verify that we collect minidump on failure for optimizer execution mode
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_minidump.mdp')))
|
cjcjameson/gpdb
|
src/test/tinc/tincrepo/mpp/models/regress/sql_related/regress_sql_test_case/regress_sql_test_case.py
|
Python
|
apache-2.0
| 36,052
|
[
"ORCA"
] |
5c8b54597ee1cd572839afe10d43cad5d64ffc014e2d41a775f5baf2f130668e
|
"""Scraper for Supreme Court of Maine
CourtID: me
Court Short Name: Me.
Author: Brian W. Carver
Date created: June 20, 2014
History:
2014-06-25 (est): Added code for additional date formats.
2014-07-02: Was receiving InsanityException and tweaked date code to get some
missing dates.
2014-12-15: Fixes insanity exception by tweaking the XPaths.
"""
from datetime import datetime
from juriscraper.OpinionSite import OpinionSite
from lxml import html
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = 'http://www.courts.maine.gov/opinions_orders/supreme/publishedopinions.shtml'
def _get_download_urls(self):
path = '//table//tr/td[2]/a[1]/@href'
return list(self.html.xpath(path))
def _get_case_names(self):
case_names = []
for e in self.html.xpath('//table//tr/td[2]/a[1]'):
s = html.tostring(e, method='text', encoding='unicode')
case_names.append(s)
return case_names
def _get_case_dates(self):
path = '//table//tr/td[3]/text()'
date_styles = ['%B %d, %Y', '%B %d,%Y']
dates = []
for s in self.html.xpath(path):
for date_style in date_styles:
try:
d = datetime.strptime(s.strip(), date_style).date()
except ValueError:
continue
dates.append(d)
return dates
def _get_precedential_statuses(self):
return ["Published"] * len(self.case_names)
def _get_neutral_citations(self):
path = '//table[position() > 1]//tr/td[1]//text()'
return list(self.html.xpath(path))
|
Andr3iC/juriscraper
|
opinions/united_states/state/me.py
|
Python
|
bsd-2-clause
| 1,766
|
[
"Brian"
] |
05c6b34d72cb267676f1f356fd16df2ebc597ca9e258027da32e78b003b9534e
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""
Camelot extends the SQLAlchemy column types with a number of its own column
types. Those field types are automatically mapped to a specific delegate taking
care of the visualisation.
Those fields are stored in the :mod:`camelot.types` module.
"""
import logging
logger = logging.getLogger('camelot.types')
from sqlalchemy import types
from camelot.core.files.storage import StoredFile, StoredImage, Storage
class VirtualAddress(types.TypeDecorator):
"""A single field that can be used to enter phone numbers, fax numbers, email
addresses, im addresses. The editor provides soft validation of the data
entered. The address or number is stored as a string in the database.
This column type accepts and returns tuples of strings, the first string is
the :attr:`virtual_address_type`, and the second the address itself.
eg: ``('email','project-camelot@conceptive.be')`` is stored as
``email://project-camelot@conceptive.be``
.. image:: /_static/virtualaddress_editor.png
"""
impl = types.Unicode
virtual_address_types = ['phone', 'fax', 'mobile', 'email', 'im', 'pager',]
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value is not None:
if value[1]:
value = u'://'.join(value)
else:
value = None
return impl_processor(value)
return processor
def result_processor(self, dialect, coltype=None):
impl_processor = self.impl.result_processor(dialect, coltype)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value:
split = value.split('://')
if len(split)>1:
return tuple(split)
return (u'phone',u'')
return processor
class Code(types.TypeDecorator):
"""SQLAlchemy column type to store codes. Where a code is a list of strings
on which a regular expression can be enforced.
This column type accepts and returns a list of strings and stores them as a
string joined with points.
eg: ``['08', 'AB']`` is stored as ``08.AB``
.. image:: /_static/editors/CodeEditor_editable.png
:param parts: a list of input masks specifying the mask for each part,
eg ``['99', 'AA']``. For valid input masks, see
`QLineEdit <http://www.riverbankcomputing.co.uk/static/Docs/PyQt4/html/qlineedit.html>`_
:param separator: a string that will be used to separate the different parts
in the GUI and in the database
:param length: the size of the underlying string field in the database, if no
length is specified, it will be calculated from the parts
"""
impl = types.Unicode
def __init__(self, parts, separator=u'.', length = None, **kwargs):
import string
translator = string.maketrans('', '')
self.parts = parts
self.separator = separator
max_length = sum(len(part.translate(translator, '<>!')) for part in parts) + len(parts)*len(self.separator)
types.TypeDecorator.__init__( self, length = length or max_length, **kwargs )
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value is not None:
value = self.separator.join(value)
return impl_processor(value)
return processor
def result_processor(self, dialect, coltype=None):
impl_processor = self.impl.result_processor(dialect, coltype)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value:
return value.split(self.separator)
return ['' for _p in self.parts]
return processor
class IPAddress(Code):
def __init__(self, **kwargs):
super(IPAddress, self).__init__(parts=['900','900','900','900'])
class Rating(types.TypeDecorator):
"""The rating field is an integer field that is visualized as a number of stars that
can be selected::
class Movie( Entity ):
title = Column( Unicode(60), nullable = False )
rating = Column( camelot.types.Rating() )
.. image:: /_static/editors/StarEditor_editable.png
"""
impl = types.Integer
class RichText(types.TypeDecorator):
"""RichText fields are unlimited text fields which contain html. The html will be
rendered in a rich text editor.
.. image:: /_static/editors/RichTextEditor_editable.png
"""
impl = types.UnicodeText
class Language(types.TypeDecorator):
"""The languages are stored as a string in the database of
the form *language*(_*country*), where :
* *language* is a lowercase, two-letter, ISO 639 language code,
* *territory* is an uppercase, two-letter, ISO 3166 country code
This used to be implemented using babel, but this was too slow and
used too much memory, so now it's implemented using QT.
"""
impl = types.Unicode
def __init__(self):
types.TypeDecorator.__init__(self, length=20)
class Color(types.TypeDecorator):
"""The Color field returns and accepts tuples of the form (r,g,b,a) where
r,g,b,a are integers between 0 and 255. The color is stored as an hexadecimal
string of the form AARRGGBB into the database, where AA is the transparency,
RR is red, GG is green BB is blue::
class MovieType( Entity ):
color = Column( camelot.types.Color() )
.. image:: /_static/editors/ColorEditor_editable.png
The colors are stored in the database as strings.
Use::
QColor(*color)
to convert a color tuple to a QColor.
"""
impl = types.Unicode
def __init__(self):
types.TypeDecorator.__init__(self, length=8)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value is not None:
assert len(value) == 4
for i in range(4):
assert value[i] >= 0
assert value[i] <= 255
return '%02X%02X%02X%02X'%(value[3], value[0], value[1], value[2])
return impl_processor(value)
return processor
def result_processor(self, dialect, coltype=None):
impl_processor = self.impl.result_processor(dialect, coltype)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value:
return (int(value[2:4],16), int(value[4:6],16), int(value[6:8],16), int(value[0:2],16))
return processor
class Enumeration(types.TypeDecorator):
"""The enumeration field stores integers in the database, but represents them as
strings. This allows efficient storage and querying while preserving readable code.
Typical use of this field would be a status field.
Enumeration fields are visualized as a combo box, where the labels in the combo
box are the capitalized strings::
class Movie(Entity):
title = Column( Unicode(60), nullable = False )
state = Column( camelot.types.Enumeration([(1,'planned'), (2,'recording'), (3,'finished'), (4,'canceled')]),
index = True, nullable = False, default = 'planning' )
.. image:: /_static/editors/ChoicesEditor_editable.png
If None should be a possible value of the enumeration, add (None, None) to the list of
possible enumerations. None will be presented as empty in the GUI.
:param choices: is a list of tuples. each tuple contains an integer and its
associated string. such as ::
choices = [(1,'draft'), (2,'approved')]
"""
impl = types.Integer
def __init__(self, choices=[], **kwargs):
types.TypeDecorator.__init__(self, **kwargs)
self._int_to_string = dict(choices)
self._string_to_int = dict((v,k) for (k,v) in choices)
self.choices = [v for (k,v) in choices]
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value is not None:
try:
value = self._string_to_int[value]
return impl_processor(value)
except KeyError, e:
logger.error('could not process enumeration value %s, possible values are %s'%(value, u', '.join(list(self._string_to_int.keys()))), exc_info=e)
raise
else:
impl_processor(value)
return processor
def result_processor(self, dialect, coltype=None):
impl_processor = self.impl.result_processor(dialect, coltype)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value is not None:
value = impl_processor(value)
try:
return self._int_to_string[value]
except KeyError, e:
logger.error('could not process %s'%value, exc_info=e)
raise
return processor
class File(types.TypeDecorator):
"""Sqlalchemy column type to store files. Only the location of the file is stored
This column type accepts and returns a StoredFile. The name of the file is
stored as a string in the database. A subdirectory upload_to can be specified::
class Movie( Entity ):
script = Column( camelot.types.File( upload_to = 'script' ) )
.. image:: /_static/editors/FileEditor_editable.png
Retrieving the actual storage from a File field can be a little cumbersome.
The easy way is taking it from the field attributes, in which it will be
put by default. If no field attributes are available at the location where
the storage is needed, eg in some function doing document processing, one
needs to go through SQLAlchemy to retrieve it.
For an 'task' object with a File field named 'document', the
storage can be retrieved::
from sqlalchemy import orm
task_mapper = orm.object_mapper( task )
document_property = task_mapper.get_property('document')
storage = document_property.columns[0].type.storage
:param max_length: the maximum length of the name of the file that will
be saved in the database.
:param upload_to: a subdirectory in the Storage, in which the the file
should be stored.
:param storage: an alternative storage to use for this field.
"""
impl = types.Unicode
stored_file_implementation = StoredFile
def __init__(self, max_length=100, upload_to=u'', storage=Storage, **kwargs):
self.max_length = max_length
self.storage = storage(upload_to, self.stored_file_implementation)
types.TypeDecorator.__init__(self, length=max_length, **kwargs)
def bind_processor(self, dialect):
impl_processor = self.impl.bind_processor(dialect)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value is not None:
assert isinstance(value, (self.stored_file_implementation))
return impl_processor(value.name)
return impl_processor(value)
return processor
def result_processor(self, dialect, coltype=None):
impl_processor = self.impl.result_processor(dialect, coltype)
if not impl_processor:
impl_processor = lambda x:x
def processor(value):
if value:
value = impl_processor(value)
return self.stored_file_implementation(self.storage, value)
return processor
class Image(File):
"""Sqlalchemy column type to store images
This column type accepts and returns a StoredImage, and stores them in the directory
specified by settings.CAMELOT_MEDIA_ROOT. The name of the file is stored as a string in
the database.
The Image field type provides the same functionallity as the File field type, but
the files stored should be images.
.. image:: /_static/editors/ImageEditor_editable.png
"""
stored_file_implementation = StoredImage
|
jeroendierckx/Camelot
|
camelot/types/__init__.py
|
Python
|
gpl-2.0
| 14,092
|
[
"VisIt"
] |
542d7dc378ef222865e8fed97d8ac064c28e331288f412e32eb8ab5bd7c0f612
|
import numpy as np
import theano
import theano.tensor as T
import unittest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_true
from smartlearner import views, stopping_criteria, Trainer, tasks
from smartlearner.direction_modifiers import DirectionClipping
from smartlearner.optimizers import SGD
from smartlearner.testing import DummyLoss, DummyBatchScheduler, DummyModel
from smartlearner.utils import sharedX
floatX = theano.config.floatX
class DummyLossWithGradient(DummyLoss):
def __init__(self, cost, param):
super().__init__()
self.cost = cost
self.model = DummyModel()
self.model._parameters = [param]
@property
def loss(self):
return self.cost
class TestDirectionClipping(unittest.TestCase):
def _build_experiment(self, threshold=1):
# Create an Nd gaussian function to optimize. This function is not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
N = 4
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), (param-center).T))
loss = DummyLossWithGradient(cost, param)
gradient_clipping = DirectionClipping(threshold=threshold)
loss.append_gradient_modifier(gradient_clipping)
optimizer = SGD(loss)
trainer = Trainer(optimizer, DummyBatchScheduler())
# Monitor the learning rate.
logger = tasks.Logger(views.MonitorVariable(list(optimizer.directions.values())[0]),
views.MonitorVariable(list(loss.gradients.values())[0]),
views.MonitorVariable(list(loss.orig_gradients.values())[0]),
views.MonitorVariable(gradient_clipping.grad_norm))
trainer.append_task(logger)
return trainer, logger, gradient_clipping
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_epoch = 10
self.trainer, self.logger, self.gradient_clipping = self._build_experiment()
self.trainer.append_task(stopping_criteria.MaxEpochStopping(self.max_epoch))
self.trainer.train()
def test_behaviour(self):
max_epoch = 10
for threshold in [0.5, 1, 1.5]:
trainer, logger, gradient_clipping = self._build_experiment(threshold)
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
trainer.train()
directions = np.array(logger.get_variable_history(0)).squeeze()
assert_true(np.all(np.sqrt(np.sum(directions**2, axis=1)) <= threshold + 1e-6))
gradients_clipped = np.array(logger.get_variable_history(1)).squeeze()
assert_array_equal(-directions, gradients_clipped)
gradients = np.array(logger.get_variable_history(2)).squeeze()
norms = np.array(logger.get_variable_history(3)).squeeze()[:, None]
assert_array_almost_equal(gradients_clipped,
gradients*threshold/np.maximum(threshold, norms))
|
SMART-Lab/smartlearner
|
tests/direction_modifiers/test_direction_clipping.py
|
Python
|
bsd-3-clause
| 3,219
|
[
"Gaussian"
] |
1207bcabf3c3c1477e6c2ff12e99ef5b9d80ece4161e4d112f3d9f317098d3c3
|
## -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for ADF output files"""
import itertools
import re
import numpy
from cclib.parser import logfileparser
from cclib.parser import utils
class ADF(logfileparser.Logfile):
"""An ADF log file"""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(ADF, self).__init__(logname="ADF", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "ADF log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'ADF("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of ADF labels.
To normalise:
(1) any periods are removed (except in the case of greek letters)
(2) XXX is replaced by X, and a " added.
(3) XX is replaced by X, and a ' added.
(4) The greek letters Sigma, Pi, Delta and Phi are replaced by
their lowercase equivalent.
"""
greeks = ['Sigma', 'Pi', 'Delta', 'Phi']
for greek in greeks:
if label.startswith(greek):
return label.lower()
ans = label.replace(".", "")
if ans[1:3] == "''":
temp = ans[0] + '"'
ans = temp
l = len(ans)
if l > 1 and ans[0] == ans[1]: # Python only tests the second condition if the first is true
if l > 2 and ans[1] == ans[2]:
ans = ans.replace(ans[0]*3, ans[0]) + '"'
else:
ans = ans.replace(ans[0]*2, ans[0]) + "'"
return ans
def normalisedegenerates(self, label, num, ndict=None):
"""Generate a string used for matching degenerate orbital labels
To normalise:
(1) if label is E or T, return label:num
(2) if label is P or D, look up in dict, and return answer
"""
if not ndict:
ndict = {
'P': {0: "P:x", 1: "P:y", 2: "P:z"},
'D': {0: "D:z2", 1: "D:x2-y2", 2: "D:xy", 3: "D:xz", 4: "D:yz"}
}
if label in ndict:
if num in ndict[label]:
return ndict[label][num]
else:
return "%s:%i" % (label, num+1)
else:
return "%s:%i" % (label, num+1)
def before_parsing(self):
# Used to avoid extracting the final geometry twice in a GeoOpt
self.NOTFOUND, self.GETLAST, self.NOMORE = list(range(3))
self.finalgeometry = self.NOTFOUND
# Used for calculating the scftarget (variables names taken from the ADF manual)
self.accint = self.SCFconv = self.sconv2 = None
# keep track of nosym and unrestricted case to parse Energies since it doens't have an all Irreps section
self.nosymflag = False
self.unrestrictedflag = False
SCFCNV, SCFCNV2 = list(range(2)) # used to index self.scftargets[]
maxelem, norm = list(range(2)) # used to index scf.values
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# If a file contains multiple calculations, currently we want to print a warning
# and skip to the end of the file, since cclib parses only the main system, which
# is usually the largest. Here we test this by checking if scftargets has already
# been parsed when another INPUT FILE segment is found, although this might
# not always be the best indicator.
if line.strip() == "(INPUT FILE)" and hasattr(self, "scftargets"):
self.logger.warning("Skipping remaining calculations")
inputfile.seek(0, 2)
return
# We also want to check to make sure we aren't parsing "Create" jobs,
# which normally come before the calculation we actually want to parse.
if line.strip() == "(INPUT FILE)":
while True:
self.updateprogress(inputfile, "Unsupported Information", self.fupdate)
line = next(inputfile) if line.strip() == "(INPUT FILE)" else None
if line and not line[:6] in ("Create", "create"):
break
line = next(inputfile)
version_searchstr = "Amsterdam Density Functional (ADF)"
if version_searchstr in line:
startidx = line.index(version_searchstr) + len(version_searchstr)
trimmed_line = line[startidx:].strip()[:-1]
# The package version is normally a year with revision number
# (such as 2013.01), but it may also be a random string (such as a
# version control branch name).
match = re.search(r"([\d\.]{4,7})", trimmed_line)
if match:
package_version = match.groups()[0]
# Use YYYY.MM as a short version.
self.metadata["legacy_package_version"] = package_version
else:
# This isn't as well-defined, but the field shouldn't be left
# empty. Grab whatever is there and parse it out in the
# following lines.
package_version = trimmed_line.strip()
# More detailed information can be found before "A D F", even if
# the above package version isn't numeric.
self.skip_line(inputfile, 's')
line = next(inputfile)
# Get the contents between the star border.
tokens = line.split()[1:-1]
assert len(tokens) >= 1
if tokens[0] == "Build":
package_version += "+{}".format(tokens[1])
else:
assert tokens[0][0] == "r"
# If a year-type version has already been parsed (YYYY(.nn)),
# it should take precedence, otherwise use the more detailed
# version first.
if match:
package_version = '{}dev{}'.format(package_version, tokens[0][1:])
else:
year = tokens[1].split("-")[0]
self.metadata["package_version_description"] = package_version
package_version = '{}dev{}'.format(year, tokens[0][1:])
self.metadata["legacy_package_version"] = year
self.metadata["package_version_date"] = tokens[1]
self.metadata["package_version"] = package_version
# In ADF 2014.01, there are (INPUT FILE) messages, so we need to use just
# the lines that start with 'Create' and run until the title or something
# else we are sure is is the calculation proper. It would be good to combine
# this with the previous block, if possible.
if line[:6] == "Create":
while line[:5] != "title" and "NO TITLE" not in line:
line = inputfile.next()
if line[1:10] == "Symmetry:":
info = line.split()
if info[1] == "NOSYM":
self.nosymflag = True
# Use this to read the subspecies of irreducible representations.
# It will be a list, with each element representing one irrep.
if line.strip() == "Irreducible Representations, including subspecies":
self.skip_line(inputfile, 'dashes')
self.irreps = []
line = next(inputfile)
while line.strip() != "":
self.irreps.append(line.split())
line = next(inputfile)
if line[4:13] == 'Molecule:':
info = line.split()
if info[1] == 'UNrestricted':
self.unrestrictedflag = True
if line[1:6] == "ATOMS":
# Find the number of atoms and their atomic numbers
# Also extract the starting coordinates (for a GeoOpt anyway)
# and the atommasses (previously called vibmasses)
self.updateprogress(inputfile, "Attributes", self.cupdate)
self.atomcoords = []
self.skip_lines(inputfile, ['header1', 'header2', 'header3'])
atomnos = []
atommasses = []
atomcoords = []
coreelectrons = []
line = next(inputfile)
while len(line) > 2: # ensure that we are reading no blank lines
info = line.split()
element = info[1].split('.')[0]
atomnos.append(self.table.number[element])
atomcoords.append(list(map(float, info[2:5])))
coreelectrons.append(int(float(info[5]) - float(info[6])))
atommasses.append(float(info[7]))
line = next(inputfile)
self.atomcoords.append(atomcoords)
self.set_attribute('natom', len(atomnos))
self.set_attribute('atomnos', atomnos)
self.set_attribute('atommasses', atommasses)
self.set_attribute('coreelectrons', coreelectrons)
if line[1:10] == "FRAGMENTS":
header = next(inputfile)
self.frags = []
self.fragnames = []
line = next(inputfile)
while len(line) > 2: # ensure that we are reading no blank lines
info = line.split()
if len(info) == 7: # fragment name is listed here
self.fragnames.append("%s_%s" % (info[1], info[0]))
self.frags.append([])
self.frags[-1].append(int(info[2]) - 1)
elif len(info) == 5: # add atoms into last fragment
self.frags[-1].append(int(info[0]) - 1)
line = next(inputfile)
# Extract charge
if line[1:11] == "Net Charge":
charge = int(line.split()[2])
self.set_attribute('charge', charge)
line = next(inputfile)
if len(line.strip()):
# Spin polar: 1 (Spin_A minus Spin_B electrons)
# (Not sure about this for higher multiplicities)
mult = int(line.split()[2]) + 1
else:
mult = 1
self.set_attribute('mult', mult)
if line[1:22] == "S C F U P D A T E S":
# find targets for SCF convergence
if not hasattr(self, "scftargets"):
self.scftargets = []
self.skip_lines(inputfile, ['e', 'b', 'numbers'])
line = next(inputfile)
self.SCFconv = float(line.split()[-1])
line = next(inputfile)
self.sconv2 = float(line.split()[-1])
# In ADF 2013, the default numerical integration method is fuzzy cells,
# although it used to be Voronoi polyhedra. Both methods apparently set
# the accint parameter, although the latter does so indirectly, based on
# a 'grid quality' setting. This is translated into accint using a
# dictionary with values taken from the documentation.
if "Numerical Integration : Voronoi Polyhedra (Te Velde)" in line:
self.integration_method = "voronoi_polyhedra"
if line[1:27] == 'General Accuracy Parameter':
# Need to know the accuracy of the integration grid to
# calculate the scftarget...note that it changes with time
self.accint = float(line.split()[-1])
if "Numerical Integration : Fuzzy Cells (Becke)" in line:
self.integration_method = 'fuzzy_cells'
if line[1:19] == "Becke grid quality":
self.grid_quality = line.split()[-1]
quality2accint = {
'BASIC': 2.0,
'NORMAL': 4.0,
'GOOD': 6.0,
'VERYGOOD': 8.0,
'EXCELLENT': 10.0,
}
self.accint = quality2accint[self.grid_quality]
# Half of the atomic orbital overlap matrix is printed since it is symmetric,
# but this requires "PRINT Smat" to be in the input. There are extra blank lines
# at the end of the block, which are used to terminate the parsing.
#
# ====== smat
#
# column 1 2 3 4
# row
# 1 1.00000000000000E+00
# 2 2.43370854175315E-01 1.00000000000000E+00
# 3 0.00000000000000E+00 0.00000000000000E+00 1.00000000000000E+00
# ...
#
if "====== smat" in line:
# Initialize the matrix with Nones so we can easily check all has been parsed.
overlaps = [[None] * self.nbasis for i in range(self.nbasis)]
self.skip_line(inputfile, 'blank')
line = inputfile.next()
while line.strip():
colline = line
assert colline.split()[0] == "column"
columns = [int(i) for i in colline.split()[1:]]
rowline = inputfile.next()
assert rowline.strip() == "row"
line = inputfile.next()
while line.strip():
i = int(line.split()[0])
vals = [float(col) for col in line.split()[1:]]
for j, o in enumerate(vals):
k = columns[j]
overlaps[k-1][i-1] = o
overlaps[i-1][k-1] = o
line = inputfile.next()
line = inputfile.next()
# Now all values should be parsed, and so no Nones remaining.
assert all([all([x is not None for x in ao]) for ao in overlaps])
self.set_attribute('aooverlaps', overlaps)
if line[1:11] == "CYCLE 1":
self.updateprogress(inputfile, "QM convergence", self.fupdate)
newlist = []
line = next(inputfile)
if not hasattr(self, "geovalues"):
# This is the first SCF cycle
self.scftargets.append([self.sconv2*10, self.sconv2])
elif self.finalgeometry in [self.GETLAST, self.NOMORE]:
# This is the final SCF cycle
self.scftargets.append([self.SCFconv*10, self.SCFconv])
else:
# This is an intermediate SCF cycle in a geometry optimization,
# in which case the SCF convergence target needs to be derived
# from the accint parameter. For Voronoi polyhedra integration,
# accint is printed and parsed. For fuzzy cells, it can be inferred
# from the grid quality setting, as is done somewhere above.
if self.accint:
oldscftst = self.scftargets[-1][1]
grdmax = self.geovalues[-1][1]
scftst = max(self.SCFconv, min(oldscftst, grdmax/30, 10**(-self.accint)))
self.scftargets.append([scftst*10, scftst])
while line.find("SCF CONVERGED") == -1 and line.find("SCF not fully converged, result acceptable") == -1 and line.find("SCF NOT CONVERGED") == -1:
if line[4:12] == "SCF test":
if not hasattr(self, "scfvalues"):
self.scfvalues = []
info = line.split()
newlist.append([float(info[4]), abs(float(info[6]))])
try:
line = next(inputfile)
except StopIteration: # EOF reached?
self.logger.warning("SCF did not converge, so attributes may be missing")
break
if line.find("SCF not fully converged, result acceptable") > 0:
self.logger.warning("SCF not fully converged, results acceptable")
if line.find("SCF NOT CONVERGED") > 0:
self.logger.warning("SCF did not converge! moenergies and mocoeffs are unreliable")
if hasattr(self, "scfvalues"):
self.scfvalues.append(newlist)
# Parse SCF energy for SP calcs from bonding energy decomposition section.
# It seems ADF does not print it earlier for SP calculations.
# Geometry optimization runs also print this, and we want to parse it
# for them, too, even if it repeats the last "Geometry Convergence Tests"
# section (but it's usually a bit different).
if line[:21] == "Total Bonding Energy:":
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = utils.convertor(float(line.split()[3]), "hartree", "eV")
self.scfenergies.append(energy)
if line[51:65] == "Final Geometry":
self.finalgeometry = self.GETLAST
# Get the coordinates from each step of the GeoOpt.
if line[1:24] == "Coordinates (Cartesian)" and self.finalgeometry in [self.NOTFOUND, self.GETLAST]:
self.skip_lines(inputfile, ['e', 'b', 'title', 'title', 'd'])
atomcoords = []
line = next(inputfile)
while list(set(line.strip())) != ['-']:
atomcoords.append(list(map(float, line.split()[5:8])))
line = next(inputfile)
if not hasattr(self, "atomcoords"):
self.atomcoords = []
self.atomcoords.append(atomcoords)
# Don't get any more coordinates in this case.
# KML: I think we could combine this with optdone (see below).
if self.finalgeometry == self.GETLAST:
self.finalgeometry = self.NOMORE
# There have been some changes in the format of the geometry convergence information,
# and this is how it is printed in older versions (2007.01 unit tests).
#
# ==========================
# Geometry Convergence Tests
# ==========================
#
# Energy old : -5.14170647
# new : -5.15951374
#
# Convergence tests:
# (Energies in hartree, Gradients in hartree/angstr or radian, Lengths in angstrom, Angles in degrees)
#
# Item Value Criterion Conv. Ratio
# -------------------------------------------------------------------------
# change in energy -0.01780727 0.00100000 NO 0.00346330
# gradient max 0.03219530 0.01000000 NO 0.30402650
# gradient rms 0.00858685 0.00666667 NO 0.27221261
# cart. step max 0.07674971 0.01000000 NO 0.75559435
# cart. step rms 0.02132310 0.00666667 NO 0.55335378
#
if line[1:27] == 'Geometry Convergence Tests':
if not hasattr(self, "geotargets"):
self.geovalues = []
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.skip_lines(inputfile, ['e', 'b'])
energies_old = next(inputfile)
energies_new = next(inputfile)
self.scfenergies.append(utils.convertor(float(energies_new.split()[-1]), "hartree", "eV"))
self.skip_lines(inputfile, ['b', 'convergence', 'units', 'b', 'header', 'd'])
values = []
for i in range(5):
temp = next(inputfile).split()
self.geotargets[i] = float(temp[-3])
values.append(float(temp[-4]))
self.geovalues.append(values)
# This is to make geometry optimization always have the optdone attribute,
# even if it is to be empty for unconverged runs.
if not hasattr(self, 'optdone'):
self.optdone = []
# After the test, there is a message if the search is converged:
#
# ***************************************************************************************************
# Geometry CONVERGED
# ***************************************************************************************************
#
if line.strip() == "Geometry CONVERGED":
self.skip_line(inputfile, 'stars')
self.optdone.append(len(self.geovalues) - 1)
# Here is the corresponding geometry convergence info from the 2013.01 unit test.
# Note that the step number is given, which it will be prudent to use in an assertion.
#
#----------------------------------------------------------------------
#Geometry Convergence after Step 3 (Hartree/Angstrom,Angstrom)
#----------------------------------------------------------------------
#current energy -5.16274478 Hartree
#energy change -0.00237544 0.00100000 F
#constrained gradient max 0.00884999 0.00100000 F
#constrained gradient rms 0.00249569 0.00066667 F
#gradient max 0.00884999
#gradient rms 0.00249569
#cart. step max 0.03331296 0.01000000 F
#cart. step rms 0.00844037 0.00666667 F
if line[:31] == "Geometry Convergence after Step":
stepno = int(line.split()[4])
# This is to make geometry optimization always have the optdone attribute,
# even if it is to be empty for unconverged runs.
if not hasattr(self, 'optdone'):
self.optdone = []
# The convergence message is inline in this block, not later as it was before.
if "** CONVERGED **" in line:
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
self.skip_line(inputfile, 'dashes')
current_energy = next(inputfile)
energy_change = next(inputfile)
constrained_gradient_max = next(inputfile)
constrained_gradient_rms = next(inputfile)
gradient_max = next(inputfile)
gradient_rms = next(inputfile)
cart_step_max = next(inputfile)
cart_step_rms = next(inputfile)
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = utils.convertor(float(current_energy.split()[-2]), "hartree", "eV")
self.scfenergies.append(energy)
if not hasattr(self, "geotargets"):
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
self.geotargets[0] = float(energy_change.split()[-2])
self.geotargets[1] = float(constrained_gradient_max.split()[-2])
self.geotargets[2] = float(constrained_gradient_rms.split()[-2])
self.geotargets[3] = float(cart_step_max.split()[-2])
self.geotargets[4] = float(cart_step_rms.split()[-2])
if not hasattr(self, "geovalues"):
self.geovalues = []
self.geovalues.append([])
self.geovalues[-1].append(float(energy_change.split()[-3]))
self.geovalues[-1].append(float(constrained_gradient_max.split()[-3]))
self.geovalues[-1].append(float(constrained_gradient_rms.split()[-3]))
self.geovalues[-1].append(float(cart_step_max.split()[-3]))
self.geovalues[-1].append(float(cart_step_rms.split()[-3]))
if line.find('Orbital Energies, per Irrep and Spin') > 0 and not hasattr(self, "mosyms") and self.nosymflag and not self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#Should only be for restricted case because there is a better text block for unrestricted and nosym
self.mosyms = [[]]
self.moenergies = [[]]
self.skip_lines(inputfile, ['e', 'header', 'd', 'label'])
line = next(inputfile)
info = line.split()
if not info[0] == '1':
self.logger.warning("MO info up to #%s is missing" % info[0])
#handle case where MO information up to a certain orbital are missing
while int(info[0]) - 1 != len(self.moenergies[0]):
self.moenergies[0].append(99999)
self.mosyms[0].append('A')
homoA = None
while len(line) > 10:
info = line.split()
self.mosyms[0].append('A')
self.moenergies[0].append(utils.convertor(float(info[2]), 'hartree', 'eV'))
if info[1] == '0.000' and not hasattr(self, 'homos'):
self.set_attribute('homos', [len(self.moenergies[0]) - 2])
line = next(inputfile)
self.moenergies = [numpy.array(self.moenergies[0], "d")]
if line[1:29] == 'Orbital Energies, both Spins' and not hasattr(self, "mosyms") and self.nosymflag and self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#should only be here if unrestricted and nosym
self.mosyms = [[], []]
moenergies = [[], []]
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
homoa = 0
homob = None
line = next(inputfile)
while len(line) > 5:
info = line.split()
if info[2] == 'A':
self.mosyms[0].append('A')
moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homoa = len(moenergies[0]) - 1
elif info[2] == 'B':
self.mosyms[1].append('A')
moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homob = len(moenergies[1]) - 1
else:
print(("Error reading line: %s" % line))
line = next(inputfile)
self.moenergies = [numpy.array(x, "d") for x in moenergies]
self.set_attribute('homos', [homoa, homob])
# Extracting orbital symmetries and energies, homos.
if line[1:29] == 'Orbital Energies, all Irreps' and not hasattr(self, "mosyms"):
self.symlist = {}
self.mosyms = [[]]
self.moenergies = [[]]
self.skip_lines(inputfile, ['e', 'b', 'header', 'd'])
homoa = None
homob = None
#multiple = {'E':2, 'T':3, 'P':3, 'D':5}
# The above is set if there are no special irreps
names = [irrep[0].split(':')[0] for irrep in self.irreps]
counts = [len(irrep) for irrep in self.irreps]
multiple = dict(list(zip(names, counts)))
irrepspecies = {}
for n in range(len(names)):
indices = list(range(counts[n]))
subspecies = self.irreps[n]
irrepspecies[names[n]] = dict(list(zip(indices, subspecies)))
line = next(inputfile)
while line.strip():
info = line.split()
if len(info) == 5: # this is restricted
#count = multiple.get(info[0][0],1)
count = multiple.get(info[0], 1)
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[3]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0], repeat, ndict=irrepspecies)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym] = [[]]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[2] == '0.00' and not hasattr(self, 'homos'):
self.homos = [len(self.moenergies[0]) - (count + 1)] # count, because need to handle degenerate cases
line = next(inputfile)
elif len(info) == 6: # this is unrestricted
if len(self.moenergies) < 2: # if we don't have space, create it
self.moenergies.append([])
self.mosyms.append([])
# count = multiple.get(info[0][0], 1)
count = multiple.get(info[0], 1)
if info[2] == 'A':
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0], repeat)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym] = [[], []]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[3] == '0.00' and homoa is None:
homoa = len(self.moenergies[0]) - (count + 1) # count because degenerate cases need to be handled
if info[2] == 'B':
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[1].append(self.normalisesym(info[0]))
self.moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0], repeat)
try:
self.symlist[sym][1].append(len(self.moenergies[1])-1)
except KeyError:
self.symlist[sym] = [[], []]
self.symlist[sym][1].append(len(self.moenergies[1])-1)
if info[3] == '0.00' and homob is None:
homob = len(self.moenergies[1]) - (count + 1)
line = next(inputfile)
else: # different number of lines
print(("Error", info))
if len(info) == 6: # still unrestricted, despite being out of loop
self.set_attribute('homos', [homoa, homob])
self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
# Section on extracting vibdisps
# Also contains vibfreqs, but these are extracted in the
# following section (see below)
if line[1:28] == "Vibrations and Normal Modes":
self.vibdisps = []
self.skip_lines(inputfile, ['e', 'b', 'header', 'header', 'b', 'b'])
freqs = next(inputfile)
while freqs.strip() != "":
minus = next(inputfile)
p = [[], [], []]
for i in range(len(self.atomnos)):
broken = list(map(float, next(inputfile).split()[1:]))
for j in range(0, len(broken), 3):
p[j//3].append(broken[j:j+3])
self.vibdisps.extend(p[:(len(broken)//3)])
self.skip_lines(inputfile, ['b', 'b'])
freqs = next(inputfile)
self.vibdisps = numpy.array(self.vibdisps, "d")
if line[1:24] == "List of All Frequencies":
# Start of the IR/Raman frequency section
self.updateprogress(inputfile, "Frequency information", self.fupdate)
# self.vibsyms = [] # Need to look into this a bit more
self.vibirs = []
self.vibfreqs = []
for i in range(8):
line = next(inputfile)
line = next(inputfile).strip()
while line:
temp = line.split()
self.vibfreqs.append(float(temp[0]))
self.vibirs.append(float(temp[2])) # or is it temp[1]?
line = next(inputfile).strip()
self.vibfreqs = numpy.array(self.vibfreqs, "d")
self.vibirs = numpy.array(self.vibirs, "d")
if hasattr(self, "vibramans"):
self.vibramans = numpy.array(self.vibramans, "d")
self.skip_lines(
inputfile,
["b", "b", "e", "Statistical Thermal Analysis", "e", "b"]
)
line = next(inputfile)
assert "Pressure" in line
self.set_attribute("pressure", float(line.split()[1]))
line = next(inputfile)
assert "Temperature" in line
self.set_attribute("temperature", float(line.split()[1]))
self.skip_lines(
inputfile,
[
"b",
"b",
"b",
"Moments of Inertia",
"e",
"b",
"principal moments",
"d",
"MOI tensor Xx",
"MOI tensor Yx",
"MOI tensor Zx",
"b",
"b",
]
)
while line.split() != ["Temp", "Transl", "Rotat", "Vibrat", "Total"]:
line = next(inputfile)
self.skip_lines(inputfile, ["d", "s"])
line = next(inputfile)
assert "Entropy" in line
self.set_attribute(
"entropy",
utils.convertor(float(line.split()[6]) * self.temperature / 1000,
"kcal/mol", "hartree")
)
line = next(inputfile)
assert "Internal Energy" in line
self.set_attribute("zpve", utils.convertor(float(line.split()[5]), "kcal/mol", "hartree"))
#******************************************************************************************************************8
#delete this after new implementation using smat, eigvec print,eprint?
# Extract the number of basis sets
if line[1:49] == "Total nr. of (C)SFOs (summation over all irreps)":
nbasis = int(line.split(":")[1].split()[0])
self.set_attribute('nbasis', nbasis)
# now that we're here, let's extract aonames
self.fonames = []
self.start_indeces = {}
self.atombasis = [[] for frag in self.frags] # parse atombasis in the case of trivial SFOs
self.skip_line(inputfile, 'blank')
note = next(inputfile)
symoffset = 0
self.skip_line(inputfile, 'blank')
line = next(inputfile)
if len(line) > 2: # fix for ADF2006.01 as it has another note
self.skip_line(inputfile, 'blank')
line = next(inputfile)
self.skip_line(inputfile, 'blank')
self.nosymreps = []
while len(self.fonames) < self.nbasis:
symline = next(inputfile)
sym = symline.split()[1]
line = next(inputfile)
num = int(line.split(':')[1].split()[0])
self.nosymreps.append(num)
#read until line "--------..." is found
while line.find('-----') < 0:
line = next(inputfile)
line = next(inputfile) # the start of the first SFO
while len(self.fonames) < symoffset + num:
info = line.split()
#index0 index1 occ2 energy3/4 fragname5 coeff6 orbnum7 orbname8 fragname9
if not sym in list(self.start_indeces.keys()):
#have we already set the start index for this symmetry?
self.start_indeces[sym] = int(info[1])
orbname = info[8]
orbital = info[7] + orbname.replace(":", "")
fragname = info[5]
frag = fragname + info[9]
coeff = float(info[6])
# parse atombasis only in the case that all coefficients are 1
# and delete it otherwise
if hasattr(self, 'atombasis'):
if coeff == 1.:
ibas = int(info[0]) - 1
ifrag = int(info[9]) - 1
iat = self.frags[ifrag][0]
self.atombasis[iat].append(ibas)
else:
del self.atombasis
line = next(inputfile)
while line.strip() and not line[:7].strip(): # while it's the same SFO
# i.e. while not completely blank, but blank at the start
info = line[43:].split()
if len(info) > 0: # len(info)==0 for the second line of dvb_ir.adfout
frag += "+" + fragname + info[-1]
coeff = float(info[-4])
if coeff < 0:
orbital += '-' + info[-3] + info[-2].replace(":", "")
else:
orbital += '+' + info[-3] + info[-2].replace(":", "")
line = next(inputfile)
# At this point, we are either at the start of the next SFO or at
# a blank line...the end
self.fonames.append("%s_%s" % (frag, orbital))
symoffset += num
# blankline blankline
next(inputfile)
next(inputfile)
if line[1:32] == "S F O P O P U L A T I O N S ,":
#Extract overlap matrix
# self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
symoffset = 0
for nosymrep in self.nosymreps:
line = next(inputfile)
while line.find('===') < 10: # look for the symmetry labels
line = next(inputfile)
self.skip_lines(inputfile, ['b', 'b'])
text = next(inputfile)
if text[13:20] != "Overlap": # verify this has overlap info
break
self.skip_lines(inputfile, ['b', 'col', 'row'])
if not hasattr(self, "fooverlaps"): # make sure there is a matrix to store this
self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
base = 0
while base < nosymrep: # have we read all the columns?
for i in range(nosymrep - base):
self.updateprogress(inputfile, "Overlap", self.fupdate)
line = next(inputfile)
parts = line.split()[1:]
for j in range(len(parts)):
k = float(parts[j])
self.fooverlaps[base + symoffset + j, base + symoffset + i] = k
self.fooverlaps[base + symoffset + i, base + symoffset + j] = k
#blank, blank, column
for i in range(3):
next(inputfile)
base += 4
symoffset += nosymrep
base = 0
# The commented code below makes the atombasis attribute based on the BAS function in ADF,
# but this is probably not so useful, since SFOs are used to build MOs in ADF.
# if line[1:54] == "BAS: List of all Elementary Cartesian Basis Functions":
#
# self.atombasis = []
#
# # There will be some text, followed by a line:
# # (power of) X Y Z R Alpha on Atom
# while not line[1:11] == "(power of)":
# line = inputfile.next()
# dashes = inputfile.next()
# blank = inputfile.next()
# line = inputfile.next()
# # There will be two blank lines when there are no more atom types.
# while line.strip() != "":
# atoms = [int(i)-1 for i in line.split()[1:]]
# for n in range(len(atoms)):
# self.atombasis.append([])
# dashes = inputfile.next()
# line = inputfile.next()
# while line.strip() != "":
# indices = [int(i)-1 for i in line.split()[5:]]
# for i in range(len(indices)):
# self.atombasis[atoms[i]].append(indices[i])
# line = inputfile.next()
# line = inputfile.next()
if line[48:67] == "SFO MO coefficients":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d")]
spin = 0
symoffset = 0
lastrow = 0
# Section ends with "1" at beggining of a line.
while line[0] != "1":
line = next(inputfile)
# If spin is specified, then there will be two coefficient matrices.
if line.strip() == "***** SPIN 1 *****":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d"),
numpy.zeros((self.nbasis, self.nbasis), "d")]
# Bump up the spin.
if line.strip() == "***** SPIN 2 *****":
spin = 1
symoffset = 0
lastrow = 0
# Next symmetry.
if line.strip()[:4] == "=== ":
sym = line.split()[1]
if self.nosymflag:
aolist = list(range(self.nbasis))
else:
aolist = self.symlist[sym][spin]
# Add to the symmetry offset of AO ordering.
symoffset += lastrow
# Blocks with coefficient always start with "MOs :".
if line[1:6] == "MOs :":
# Next line has the MO index contributed to.
monumbers = [int(n) for n in line[6:].split()]
self.skip_lines(inputfile, ['occup', 'label'])
# The table can end with a blank line or "1".
row = 0
line = next(inputfile)
while not line.strip() in ["", "1"]:
info = line.split()
if int(info[0]) < self.start_indeces[sym]:
#check to make sure we aren't parsing CFs
line = next(inputfile)
continue
self.updateprogress(inputfile, "Coefficients", self.fupdate)
row += 1
coeffs = [float(x) for x in info[1:]]
moindices = [aolist[n-1] for n in monumbers]
# The AO index is 1 less than the row.
aoindex = symoffset + row - 1
for i in range(len(monumbers)):
self.mocoeffs[spin][moindices[i], aoindex] = coeffs[i]
line = next(inputfile)
lastrow = row
# **************************************************************************
# * *
# * Final excitation energies from Davidson algorithm *
# * *
# **************************************************************************
#
# Number of loops in Davidson routine = 20
# Number of matrix-vector multiplications = 24
# Type of excitations = SINGLET-SINGLET
#
# Symmetry B.u
#
# ... several blocks ...
#
# Normal termination of EXCITATION program part
if line[4:53] == "Final excitation energies from Davidson algorithm":
while line[1:9] != "Symmetry" and "Normal termination" not in line:
line = next(inputfile)
symm = self.normalisesym(line.split()[1])
# Excitation energies E in a.u. and eV, dE wrt prev. cycle,
# oscillator strengths f in a.u.
#
# no. E/a.u. E/eV f dE/a.u.
# -----------------------------------------------------
# 1 0.17084 4.6488 0.16526E-01 0.28E-08
# ...
while line.split() != ['no.', 'E/a.u.', 'E/eV', 'f', 'dE/a.u.'] and "Normal termination" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'dashes')
etenergies = []
etoscs = []
etsyms = []
line = next(inputfile)
while len(line) > 2:
info = line.split()
etenergies.append(utils.convertor(float(info[2]), "eV", "wavenumber"))
etoscs.append(float(info[3]))
etsyms.append(symm)
line = next(inputfile)
# There is another section before this, with transition dipole moments,
# but this should just skip past it.
while line[1:53] != "Major MO -> MO transitions for the above excitations":
line = next(inputfile)
# Note that here, and later, the number of blank lines can vary between
# version of ADF (extra lines are seen in 2013.01 unit tests, for example).
self.skip_line(inputfile, 'blank')
excitation_occupied = next(inputfile)
header = next(inputfile)
while not header.strip():
header = next(inputfile)
header2 = next(inputfile)
x_y_z = next(inputfile)
line = next(inputfile)
while not line.strip():
line = next(inputfile)
# Before we start handeling transitions, we need to create mosyms
# with indices; only restricted calcs are possible in ADF.
counts = {}
syms = []
for mosym in self.mosyms[0]:
if list(counts.keys()).count(mosym) == 0:
counts[mosym] = 1
else:
counts[mosym] += 1
syms.append(str(counts[mosym]) + mosym)
etsecs = []
printed_warning = False
for i in range(len(etenergies)):
etsec = []
info = line.split()
while len(info) > 0:
match = re.search('[^0-9]', info[1])
index1 = int(info[1][:match.start(0)])
text = info[1][match.start(0):]
symtext = text[0].upper() + text[1:]
sym1 = str(index1) + self.normalisesym(symtext)
match = re.search('[^0-9]', info[3])
index2 = int(info[3][:match.start(0)])
text = info[3][match.start(0):]
symtext = text[0].upper() + text[1:]
sym2 = str(index2) + self.normalisesym(symtext)
try:
index1 = syms.index(sym1)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
try:
index2 = syms.index(sym2)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
etsec.append([(index1, 0), (index2, 0), float(info[4])])
line = next(inputfile)
info = line.split()
etsecs.append(etsec)
# Again, the number of blank lines between transition can vary.
line = next(inputfile)
while not line.strip():
line = next(inputfile)
if not hasattr(self, "etenergies"):
self.etenergies = etenergies
else:
self.etenergies += etenergies
if not hasattr(self, "etoscs"):
self.etoscs = etoscs
else:
self.etoscs += etoscs
if not hasattr(self, "etsyms"):
self.etsyms = etsyms
else:
self.etsyms += etsyms
if not hasattr(self, "etsecs"):
self.etsecs = etsecs
else:
self.etsecs += etsecs
if "M U L L I K E N P O P U L A T I O N S" in line:
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
while line[1:5] != "Atom":
line = next(inputfile)
self.skip_line(inputfile, 'dashes')
mulliken = []
line = next(inputfile)
while line.strip():
mulliken.append(float(line.split()[2]))
line = next(inputfile)
self.atomcharges["mulliken"] = mulliken
# Dipole moment is always printed after a point calculation,
# and the reference point for this is always the origin (0,0,0)
# and not necessarily the center of mass, as explained on the
# ADF user mailing list (see cclib/cclib#113 for details).
#
# =============
# Dipole Moment *** (Debye) ***
# =============
#
# Vector : 0.00000000 0.00000000 0.00000000
# Magnitude: 0.00000000
#
if line.strip()[:13] == "Dipole Moment":
self.skip_line(inputfile, 'equals')
# There is not always a blank line here, for example when the dipole and quadrupole
# moments are printed after the multipole derived atomic charges. Still, to the best
# of my knowledge (KML) the values are still in Debye.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
assert line.split()[0] == "Vector"
dipole = [float(d) for d in line.split()[-3:]]
reference = [0.0, 0.0, 0.0]
if not hasattr(self, 'moments'):
self.moments = [reference, dipole]
else:
try:
assert self.moments[1] == dipole
except AssertionError:
self.logger.warning('Overwriting previous multipole moments with new values')
self.moments = [reference, dipole]
# Molecular response properties.
if line.strip()[1:-1].strip() == "RESPONSE program part":
while line.strip() != "Normal termination of RESPONSE program part":
if "THE DIPOLE-DIPOLE POLARIZABILITY TENSOR:" in line:
if not hasattr(self, 'polarizabilities'):
self.polarizabilities = []
polarizability = numpy.empty(shape=(3, 3))
self.skip_lines(inputfile, ['b', 'FREQUENCY', 'coordinates'])
# Ordering of rows/columns is Y, Z, X.
ordering = [1, 2, 0]
indices = list(itertools.product(ordering, ordering))
for i in range(3):
tokens = next(inputfile).split()
for j in range(3):
polarizability[indices[(i*3)+j]] = tokens[j]
self.polarizabilities.append(polarizability)
line = next(inputfile)
if line[:24] == ' Buffered I/O statistics':
self.metadata['success'] = True
|
berquist/cclib
|
cclib/parser/adfparser.py
|
Python
|
bsd-3-clause
| 54,001
|
[
"ADF",
"cclib"
] |
1da69ce6c8cd724783daac4c5bd19f7397b979bfda8534d01daa49d9b51ec3d5
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import mcscf
class KnownValues(unittest.TestCase):
def test_rhf(self):
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'cc-pvtz'
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = mcscf.CASSCF(m, 6, 4)
mc.verbose = 5
mo = m.mo_coeff
eris0 = mcscf.mc_ao2mo._ERIS(mc, mo, 'incore')
eris1 = mcscf.mc_ao2mo._ERIS(mc, mo, 'outcore')
eris2 = mcscf.mc_ao2mo._ERIS(mc, mo, 'outcore', level=1)
eris3 = mcscf.mc_ao2mo._ERIS(mc, mo, 'outcore', level=2)
self.assertTrue(numpy.allclose(eris0.vhf_c, eris1.vhf_c))
self.assertTrue(numpy.allclose(eris0.j_pc , eris1.j_pc ))
self.assertTrue(numpy.allclose(eris0.k_pc , eris1.k_pc ))
self.assertTrue(numpy.allclose(eris0.ppaa , eris1.ppaa ))
self.assertTrue(numpy.allclose(eris0.papa , eris1.papa ))
self.assertTrue(numpy.allclose(eris0.vhf_c, eris2.vhf_c))
self.assertTrue(numpy.allclose(eris0.j_pc , eris2.j_pc ))
self.assertTrue(numpy.allclose(eris0.k_pc , eris2.k_pc ))
self.assertTrue(numpy.allclose(eris0.ppaa , eris2.ppaa ))
self.assertTrue(numpy.allclose(eris0.papa , eris2.papa ))
self.assertTrue(numpy.allclose(eris0.vhf_c, eris3.vhf_c))
self.assertTrue(numpy.allclose(eris0.ppaa , eris3.ppaa ))
self.assertTrue(numpy.allclose(eris0.papa , eris3.papa ))
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nmo = mo.shape[1]
eri = ao2mo.incore.full(m._eri, mo, compact=False).reshape((nmo,)*4)
aaap = numpy.array(eri[ncore:nocc,ncore:nocc,ncore:nocc,:])
jc_pp = numpy.einsum('iipq->ipq', eri[:ncore,:ncore,:,:])
kc_pp = numpy.einsum('ipqi->ipq', eri[:ncore,:,:,:ncore])
vhf_c = numpy.einsum('cij->ij', jc_pp)*2 - numpy.einsum('cij->ij', kc_pp)
j_pc = numpy.einsum('ijj->ji', jc_pp)
k_pc = numpy.einsum('ijj->ji', kc_pp)
ppaa = numpy.array(eri[:,:,ncore:nocc,ncore:nocc])
papa = numpy.array(eri[:,ncore:nocc,:,ncore:nocc])
self.assertTrue(numpy.allclose(vhf_c, eris0.vhf_c))
self.assertTrue(numpy.allclose(j_pc , eris0.j_pc ))
self.assertTrue(numpy.allclose(k_pc , eris0.k_pc ))
self.assertTrue(numpy.allclose(ppaa , eris0.ppaa ))
self.assertTrue(numpy.allclose(papa , eris0.papa ))
mol.stdout.close()
def test_uhf(self):
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'cc-pvtz'
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
mc = mcscf.umc1step.CASSCF(m, 4, 4)
mc.verbose = 5
mo = m.mo_coeff
eris0 = mcscf.umc_ao2mo._ERIS(mc, mo, 'incore')
eris1 = mcscf.umc_ao2mo._ERIS(mc, mo, 'outcore')
self.assertTrue(numpy.allclose(eris1.jkcpp, eris0.jkcpp))
self.assertTrue(numpy.allclose(eris1.jkcPP, eris0.jkcPP))
self.assertTrue(numpy.allclose(eris1.jC_pp, eris0.jC_pp))
self.assertTrue(numpy.allclose(eris1.jc_PP, eris0.jc_PP))
self.assertTrue(numpy.allclose(eris1.aapp , eris0.aapp ))
self.assertTrue(numpy.allclose(eris1.aaPP , eris0.aaPP ))
self.assertTrue(numpy.allclose(eris1.AApp , eris0.AApp ))
self.assertTrue(numpy.allclose(eris1.AAPP , eris0.AAPP ))
self.assertTrue(numpy.allclose(eris1.appa , eris0.appa ))
self.assertTrue(numpy.allclose(eris1.apPA , eris0.apPA ))
self.assertTrue(numpy.allclose(eris1.APPA , eris0.APPA ))
self.assertTrue(numpy.allclose(eris1.cvCV , eris0.cvCV ))
self.assertTrue(numpy.allclose(eris1.Icvcv, eris0.Icvcv))
self.assertTrue(numpy.allclose(eris1.ICVCV, eris0.ICVCV))
self.assertTrue(numpy.allclose(eris1.Iapcv, eris0.Iapcv))
self.assertTrue(numpy.allclose(eris1.IAPCV, eris0.IAPCV))
self.assertTrue(numpy.allclose(eris1.apCV , eris0.apCV ))
self.assertTrue(numpy.allclose(eris1.APcv , eris0.APcv ))
nmo = mo[0].shape[1]
ncore = mc.ncore
ncas = mc.ncas
nocc = (ncas + ncore[0], ncas + ncore[1])
eriaa = ao2mo.incore.full(mc._scf._eri, mo[0])
eriab = ao2mo.incore.general(mc._scf._eri, (mo[0],mo[0],mo[1],mo[1]))
eribb = ao2mo.incore.full(mc._scf._eri, mo[1])
eriaa = ao2mo.restore(1, eriaa, nmo)
eriab = ao2mo.restore(1, eriab, nmo)
eribb = ao2mo.restore(1, eribb, nmo)
jkcpp = numpy.einsum('iipq->ipq', eriaa[:ncore[0],:ncore[0],:,:]) \
- numpy.einsum('ipqi->ipq', eriaa[:ncore[0],:,:,:ncore[0]])
jkcPP = numpy.einsum('iipq->ipq', eribb[:ncore[1],:ncore[1],:,:]) \
- numpy.einsum('ipqi->ipq', eribb[:ncore[1],:,:,:ncore[1]])
jC_pp = numpy.einsum('pqii->pq', eriab[:,:,:ncore[1],:ncore[1]])
jc_PP = numpy.einsum('iipq->pq', eriab[:ncore[0],:ncore[0],:,:])
aapp = numpy.copy(eriaa[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
aaPP = numpy.copy(eriab[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
AApp = numpy.copy(eriab[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].transpose(2,3,0,1))
AAPP = numpy.copy(eribb[ncore[1]:nocc[1],ncore[1]:nocc[1],:,:])
appa = numpy.copy(eriaa[ncore[0]:nocc[0],:,:,ncore[0]:nocc[0]])
apPA = numpy.copy(eriab[ncore[0]:nocc[0],:,:,ncore[1]:nocc[1]])
APPA = numpy.copy(eribb[ncore[1]:nocc[1],:,:,ncore[1]:nocc[1]])
cvCV = numpy.copy(eriab[:ncore[0],ncore[0]:,:ncore[1],ncore[1]:])
Icvcv = eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:] * 2\
- eriaa[:ncore[0],:ncore[0],ncore[0]:,ncore[0]:].transpose(0,3,1,2) \
- eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:].transpose(0,3,2,1)
ICVCV = eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:] * 2\
- eribb[:ncore[1],:ncore[1],ncore[1]:,ncore[1]:].transpose(0,3,1,2) \
- eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:].transpose(0,3,2,1)
Iapcv = eriaa[ncore[0]:nocc[0],:,:ncore[0],ncore[0]:] * 2 \
- eriaa[:,ncore[0]:,:ncore[0],ncore[0]:nocc[0]].transpose(3,0,2,1) \
- eriaa[:,:ncore[0],ncore[0]:,ncore[0]:nocc[0]].transpose(3,0,1,2)
IAPCV = eribb[ncore[1]:nocc[1],:,:ncore[1],ncore[1]:] * 2 \
- eribb[:,ncore[1]:,:ncore[1],ncore[1]:nocc[1]].transpose(3,0,2,1) \
- eribb[:,:ncore[1],ncore[1]:,ncore[1]:nocc[1]].transpose(3,0,1,2)
apCV = numpy.copy(eriab[ncore[0]:nocc[0],:,:ncore[1],ncore[1]:])
APcv = numpy.copy(eriab[:ncore[0],ncore[0]:,ncore[1]:nocc[1],:].transpose(2,3,0,1))
self.assertTrue(numpy.allclose(jkcpp, eris0.jkcpp))
self.assertTrue(numpy.allclose(jkcPP, eris0.jkcPP))
self.assertTrue(numpy.allclose(jC_pp, eris0.jC_pp))
self.assertTrue(numpy.allclose(jc_PP, eris0.jc_PP))
self.assertTrue(numpy.allclose(aapp , eris0.aapp ))
self.assertTrue(numpy.allclose(aaPP , eris0.aaPP ))
self.assertTrue(numpy.allclose(AApp , eris0.AApp ))
self.assertTrue(numpy.allclose(AAPP , eris0.AAPP ))
self.assertTrue(numpy.allclose(appa , eris0.appa ))
self.assertTrue(numpy.allclose(apPA , eris0.apPA ))
self.assertTrue(numpy.allclose(APPA , eris0.APPA ))
self.assertTrue(numpy.allclose(cvCV , eris0.cvCV ))
self.assertTrue(numpy.allclose(Icvcv, eris0.Icvcv))
self.assertTrue(numpy.allclose(ICVCV, eris0.ICVCV))
self.assertTrue(numpy.allclose(Iapcv, eris0.Iapcv))
self.assertTrue(numpy.allclose(IAPCV, eris0.IAPCV))
self.assertTrue(numpy.allclose(apCV , eris0.apCV ))
self.assertTrue(numpy.allclose(APcv , eris0.APcv ))
mol.stdout.close()
if __name__ == "__main__":
print("Full Tests for mc_ao2mo")
unittest.main()
|
gkc1000/pyscf
|
pyscf/mcscf/test/test_mcao2mo.py
|
Python
|
apache-2.0
| 8,904
|
[
"PySCF"
] |
9d30011583f127aa14c95ec25ed6ac2ad3f195c5f8a1fa757386addbde65fc82
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for OOD evaluation.
Referneces:
[1]: Lee, Kimin, et al. "A simple unified framework for detecting
out-of-distribution samples and adversarial attacks." Advances in neural
information processing systems 31 (2018).
https://arxiv.org/abs/1807.03888
"""
from absl import logging
import jax
import numpy as np
import scipy
import sklearn.metrics
import input_utils # local file import from baselines.jft
SUPPORTED_OOD_METHODS = ('msp', 'entropy', 'maha', 'rmaha', 'mlogit')
# TODO(dusenberrymw): Move it to robustness metrics.
def compute_ood_metrics(targets,
predictions,
tpr_thres=0.95,
targets_threshold=None):
"""Computes Area Under the ROC and PR curves and FPRN.
ROC - Receiver Operating Characteristic
PR - Precision and Recall
FPRN - False positive rate at which true positive rate is N.
Args:
targets: np.ndarray of targets, either 0 or 1, or continuous values.
predictions: np.ndarray of predictions, any value.
tpr_thres: float, threshold for true positive rate.
targets_threshold: float, if target values are continuous values, this
threshold binarizes them.
Returns:
A dictionary with AUC-ROC, AUC-PR, and FPRN scores.
"""
if targets_threshold is not None:
targets = np.array(targets)
targets = np.where(targets < targets_threshold,
np.zeros_like(targets, dtype=np.int32),
np.ones_like(targets, dtype=np.int32))
fpr, tpr, _ = sklearn.metrics.roc_curve(targets, predictions)
fprn = fpr[np.argmax(tpr >= tpr_thres)]
return {
'auroc': sklearn.metrics.roc_auc_score(targets, predictions),
'auprc': sklearn.metrics.average_precision_score(targets, predictions),
'fprn': fprn,
}
class OODMetric:
"""OOD metric class that stores scores and OOD labels."""
def __init__(self, dataset_name, method_name):
if method_name not in SUPPORTED_OOD_METHODS:
raise NotImplementedError(
'Only %s are supported for OOD evaluation! Got metric_name=%s!' %
(','.join(SUPPORTED_OOD_METHODS), method_name))
self.datatset_name = dataset_name
self.method_name = method_name
self.metric_name = f'{dataset_name}_{method_name}'
self.scores = []
self.labels = []
def update(self, scores, labels):
self.scores += list(scores)
self.labels += list(labels)
def reset_states(self):
self.scores = []
self.labels = []
def get_scores_and_labels(self):
return self.scores, self.labels
def get_metric_name(self):
return self.metric_name
def compute_ood_scores(self, scores):
"""Compute OOD scores.
Compute OOD scores that indicate uncertainty.
Args:
scores: A dict that contains scores for computing OOD scores. A full dict
can contain probs, Mahalanobis distance, and Relative Mahalanobis
distance. The scores should be of the size [batch_size, num_classes]
Returns:
OOD scores: OOD scores that indicate uncertainty. Should be of the size
[batch_size, ]
Raises:
KeyError: An error occurred when the corresponding scores needed for
computing OOD scores are not found in the scores dict.
"""
ood_scores = None
if self.method_name == 'msp':
if 'probs' in scores:
ood_scores = 1 - np.max(scores['probs'], axis=-1)
else:
raise KeyError(
('The variable probs is needed for computing MSP OOD score. ',
'But it is not found in the dict.'))
elif self.method_name == 'mlogit':
if 'logits' in scores:
ood_scores = 1 - np.max(scores['logits'], axis=-1)
else:
raise KeyError(('The variable logits is needed for computing MaxLogits',
' OOD score. But it is not found in the dict.'))
elif self.method_name == 'entropy':
if 'entropy' in scores:
ood_scores = scores['entropy']
else:
raise KeyError(
'The variable entropy is needed for computing Entropy OOD score.',
'But it is not found in the dict.')
elif self.method_name == 'maha':
if 'dists' in scores:
ood_scores = np.min(scores['dists'], axis=-1)
else:
raise KeyError(
('The variable dists is needed for computing Mahalanobis distance ',
'OOD score. But it is not found in the dict.'))
elif self.method_name == 'rmaha':
if 'dists' in scores and 'dists_background' in scores:
ood_scores = np.min(
scores['dists'], axis=-1) - scores['dists_background'].reshape(-1)
else:
raise KeyError((
'The variable dists and dists_background are needed for computing ',
'Mahalanobis distance OOD score. But it is not found in the dict.'))
return ood_scores
def compute_metrics(self, tpr_thres=0.95, targets_threshold=None):
return compute_ood_metrics(
self.labels,
self.scores,
tpr_thres=tpr_thres,
targets_threshold=targets_threshold)
def compute_mean_and_cov(embeds, labels):
"""Computes class-specific means and shared covariance matrix of given embedding.
The computation follows Eq (1) in [1].
Args:
embeds: An np.array of size [n_train_sample, n_dim], where n_train_sample is
the sample size of training set, n_dim is the dimension of the embedding.
labels: An np.array of size [n_train_sample, ]
Returns:
mean_list: A list of len n_class, and the i-th element is an np.array of
size [n_dim, ] corresponding to the mean of the fitted Guassian distribution
for the i-th class.
cov: The shared covariance mmatrix of the size [n_dim, n_dim].
"""
n_dim = embeds.shape[1]
class_ids = np.unique(labels)
mean_list = []
cov = np.zeros((n_dim, n_dim))
for class_id in class_ids:
data = embeds[labels == class_id]
data_mean = np.mean(data, axis=0)
cov += np.dot((data - data_mean).T, (data - data_mean))
mean_list.append(data_mean)
cov = cov / len(labels)
return mean_list, cov
def compute_mahalanobis_distance(embeds, mean_list, cov, epsilon=1e-20):
"""Computes Mahalanobis distance between the input to the fitted Guassians.
The computation follows Eq.(2) in [1].
Args:
embeds: An np.array of size [n_test_sample, n_dim], where n_test_sample is
the sample size of the test set, n_dim is the size of the embeddings.
mean_list: A list of len n_class, and the i-th element is an np.array of
size [n_dim, ] corresponding to the mean of the fitted Guassian
distribution for the i-th class.
cov: The shared covariance mmatrix of the size [n_dim, n_dim].
epsilon: The small value added to the diagonal of the covariance matrix to
avoid singularity.
Returns:
out: An np.array of size [n_test_sample, n_class] where the [i, j] element
corresponds to the Mahalanobis distance between i-th sample to the j-th
class Guassian.
"""
n_sample = embeds.shape[0]
n_class = len(mean_list)
v = cov + np.eye(cov.shape[0], dtype=int) * epsilon # avoid singularity
vi = np.linalg.inv(v)
means = np.array(mean_list)
out = np.zeros((n_sample, n_class))
for i in range(n_sample):
x = embeds[i]
out[i, :] = np.diag(np.dot(np.dot((x - means), vi), (x - means).T))
return out
def load_ood_datasets(
dataset,
ood_datasets,
ood_split,
pp_eval,
pp_eval_ood,
ood_methods,
train_split,
data_dir,
get_data_fn,
):
"""Load datasets for OOD evaluation.
The datasets should include in-distribution test dataset, OOD test dataset,
and in-distribution training dataset if Mahalanobis distance based method is
applied.
Args:
dataset: The name of in-distribution dataset.
ood_datasets: A list of OOD dataset names.
ood_split: The split of the OOD dataset.
pp_eval: The pre-processing method applied to the ind input datasets.
pp_eval_ood: The pre-processing methods applied to the ood input datasets.
ood_methods: The OOD methods used for evaluation. Can be choose from 'msp',
'maha', 'rmaha'.
train_split: The split of the training in-distribution dataset.
data_dir: The data directory.
get_data_fn: A function that generates a tf.data.Dataset given a dataset
name or builder, split, preprocessing function, and optional data_dir.
Returns:
ood_ds: A dictionary with dataset label as the key and dataset as the value.
ood_ds_names: A list of dataset labels.
"""
ood_ds = {}
ood_ds_names = []
if isinstance(ood_split, str):
ood_ds.update({'ind': get_data_fn(dataset, ood_split, pp_eval, data_dir)})
ood_ds_names.append('ind')
for ood_dataset, pp_ood in zip(ood_datasets, pp_eval_ood):
ood_ds_name = 'ood_' + ood_dataset
logging.info(
'Load OOD ds, ood_dataset = %s, ood_split = %s, pp_ood = %s, data_dir = %s',
ood_dataset, ood_split, pp_ood, data_dir)
ood_ds.update({
ood_ds_name: get_data_fn(ood_dataset, ood_split, pp_ood, data_dir),
})
ood_ds_names.append(ood_ds_name)
else:
raise NotImplementedError(
'Only string type of ood_split is supported for OOD evaluation! Got ood_split=%s!'
% str(ood_split))
if 'maha' in ood_methods or 'rmaha' in ood_methods:
# Adding training set for fitting class conditional Gaussian for
# Mahalanoabis distance based method
if isinstance(train_split, str):
ood_ds.update(
{'train_maha': get_data_fn(dataset, train_split, pp_eval, data_dir)})
ood_ds_names.insert(0, 'train_maha')
else:
raise NotImplementedError(
'Only string type of train_split is supported for OOD evaluation! Got train_split=%s!'
% str(train_split))
return ood_ds, ood_ds_names
# TODO(dusenberrymw,jjren): Add a test case.
def eval_ood_metrics(ood_ds,
ood_ds_names,
ood_methods,
evaluation_fn,
opt_target_repl,
n_prefetch=1):
"""Evaluate the model for OOD detection and record metrics.
Args:
ood_ds: A dictionary with dataset label as the key and dataset as the value.
ood_ds_names: List of strings of the in- and out-of-distribution datasets.
Generally corresponds to the keys of `ood_ds` but keeps a specific order
to satisfy dependency constraints across the metrics.
ood_methods: List of strings of the methods used for OOD detection.
The strings are in ['msp', 'entropy', 'maha', 'rmaha', 'mlogit'].
evaluation_fn: Function to evaluate the model with the parameters provided
in `opt_target_repl`.
opt_target_repl: The target of the replicated optmizer (`opt_repl.target`).
n_prefetch: Number of points to pre-fectch in the dataset iterators.
Returns:
Dictionary of measurements of the OOD detection tasks.
"""
# MSP stands for maximum softmax probability, max(softmax(logits)).
# MSP can be used as confidence score.
# Maha stands for Mahalanobis distance between the test input and
# fitted class conditional Gaussian distributions based on the
# embeddings. Mahalanobis distance can be used as uncertainty score
# or in other words, negative Mahalanobis distance can be used as
# confidence score.
# RMaha stnads for Relative Mahalanobis distance (Ren et al. 2021)
# https://arxiv.org/abs/2106.09022
ood_metrics = {}
for ood_ds_name in ood_ds_names:
if 'ood' in ood_ds_name:
ood_metrics[ood_ds_name] = [
OODMetric(ood_ds_name, ood_method) for ood_method in ood_methods
]
output = {}
# Mean and cov of class conditional Guassian in Mahalanobis distance.
# Mean_background and cov_background for the unified Guassian model
# regardless of class labels for computing Relative Mahalanobis distance
mean_list, cov = None, None
mean_list_background, cov_background = None, None
for ood_ds_name in ood_ds_names:
# The dataset train_maha must come before ind and ood
# because the train_maha will be used to esimate the class conditional
# mean and shared covariance.
val_ds = ood_ds[ood_ds_name]
val_iter = input_utils.start_input_pipeline(val_ds, n_prefetch)
ncorrect, loss, nseen = 0, 0, 0
pre_logits_list, labels_list = [], []
for batch in val_iter:
batch_scores = {}
batch_ncorrect, batch_losses, batch_n, batch_metric_args = evaluation_fn(
opt_target_repl, batch['image'], batch['labels'], batch['mask'])
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
# Here we parse batch_metric_args to compute OOD metrics.
logits, labels, pre_logits, masks = batch_metric_args
masks_bool = np.array(masks[0], dtype=bool)
if not np.any(masks_bool):
continue # No valid examples in this batch.
if ood_ds_name == 'train_maha':
# For Mahalanobis distance, we need to first fit class conditional
# Gaussian using training data.
pre_logits_list.append(np.array(pre_logits[0])[masks_bool])
labels_list.append(np.array(labels[0])[masks_bool])
else:
# Computes Mahalanobis distance.
if mean_list is not None and cov is not None:
dists = compute_mahalanobis_distance(
np.array(pre_logits[0])[masks_bool], mean_list, cov)
batch_scores['dists'] = dists
if mean_list_background is not None and cov_background is not None:
dists_background = compute_mahalanobis_distance(
np.array(pre_logits[0])[masks_bool], mean_list_background,
cov_background)
batch_scores['dists_background'] = dists_background
# Computes Maximum softmax probability (MSP)
probs = jax.nn.softmax(logits[0], axis=-1)[masks_bool]
batch_scores['probs'] = probs
batch_scores['logits'] = logits[0][masks_bool]
# Compute Entropy
batch_scores['entropy'] = np.array(
[scipy.stats.entropy(prob) for prob in probs])
# Update metric state for each metric in ood_metrics
if ood_ds_name == 'ind':
for metric_list in ood_metrics.values():
for metric in metric_list:
ood_scores = metric.compute_ood_scores(batch_scores)
ood_labels = np.zeros_like(ood_scores)
metric.update(ood_scores, ood_labels)
else:
for metric in ood_metrics[ood_ds_name]:
ood_scores = metric.compute_ood_scores(batch_scores)
ood_labels = np.ones_like(ood_scores)
metric.update(ood_scores, ood_labels)
logging.info('ood_ds_name %s, nseen %s', ood_ds_name, nseen)
if ood_ds_name == 'train_maha':
# Estimate class conditional Gaussian distribution for Mahalanobis dist.
pre_logits_train = np.vstack(np.vstack(pre_logits_list))
labels_train = np.argmax(np.vstack(np.vstack(labels_list)), axis=-1)
mean_list, cov = compute_mean_and_cov(pre_logits_train, labels_train)
mean_list_background, cov_background = compute_mean_and_cov(
pre_logits_train, np.zeros_like(labels_train))
elif ood_ds_name == 'ind':
# Evaluate in-distribution prediction accuracy
output[f'{ood_ds_name}_prec@1'] = ncorrect / nseen
output[f'{ood_ds_name}_loss'] = loss / nseen
for metric_list in ood_metrics.values():
for metric in metric_list:
metric_name = metric.get_metric_name()
metric_values = metric.compute_metrics()
for key, value in metric_values.items():
output[f'{metric_name}_{key}'] = value
return output
|
google/uncertainty-baselines
|
baselines/jft/ood_utils.py
|
Python
|
apache-2.0
| 16,317
|
[
"Gaussian"
] |
9fa3aefe2bdbb1917446b39a3313d0d78e8cc51caf0b71a1910092976879b314
|
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "ETag" header. This saves bandwidth but slows down performance.
# Deprecated (RemovedInDjango21Warning) in favor of ConditionalGetMiddleware
# which sets the ETag regardless of this setting.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
mjtamlyn/django
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 22,000
|
[
"VisIt"
] |
9bcfaba823378d6a68ce02c0e376dcf2146c60553f0a1bbad97e11e1d96b2549
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('visit', '0098_auto_20150922_1852'),
]
operations = [
migrations.AddField(
model_name='student',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='visit',
name='involvement_types',
field=models.ManyToManyField(related_name='visits', to='visit.InvolvementType', blank=True),
),
migrations.AlterField(
model_name='visit',
name='issues_prek',
field=models.ManyToManyField(related_name='visits', to='visit.IssuePreK', blank=True),
),
migrations.AlterField(
model_name='visit',
name='issues_primary',
field=models.ManyToManyField(related_name='visits', to='visit.IssuePrimary', blank=True),
),
migrations.AlterField(
model_name='visit',
name='requested_services',
field=models.ManyToManyField(related_name='visits', to='visit.RequestedService', blank=True),
),
]
|
koebbe/homeworks
|
visit/migrations/0099_auto_20160120_2205.py
|
Python
|
mit
| 1,256
|
[
"VisIt"
] |
6e28cdda4075f48e0ef6628840eedfb17c96c1a999d80cf3b56f799cb31f1ce8
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
import rtree
import numpy as np
import netCDF4 as nc4
from django.conf import settings
from pyaxiom.netcdf import EnhancedDataset, EnhancedMFDataset
from wms.utils import find_appropriate_time
from wms.models import VirtualLayer, Layer, Style
from wms import logger # noqa
def try_float(obj):
try:
return int(obj)
except ValueError:
return None
class NetCDFDataset(object):
@contextmanager
def dataset(self):
try:
# Dataset is already loaded
self._dataset.variables
yield self._dataset
except AttributeError:
try:
self._dataset = EnhancedDataset(self.path())
yield self._dataset
except (OSError, RuntimeError, FileNotFoundError):
try:
self._dataset = EnhancedMFDataset(self.path(), aggdim='time')
yield self._dataset
except (OSError, IndexError, RuntimeError, FileNotFoundError):
yield None
@contextmanager
def topology(self):
try:
self._topology.variables
yield self._topology
except AttributeError:
try:
self._topology = EnhancedDataset(self.topology_file)
yield self._topology
except RuntimeError:
yield None
def close(self):
try:
self._dataset.close()
except BaseException:
pass
try:
self._topology.close()
except BaseException:
pass
@property
def topology_file(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.nc'.format(self.safe_filename))
@property
def time_cache_file(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.npy'.format(self.safe_filename))
@property
def domain_file(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.domain'.format(self.safe_filename))
@property
def node_tree_root(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.nodes').format(self.safe_filename)
@property
def node_tree_data_file(self):
return '{}.dat'.format(self.node_tree_root)
@property
def node_tree_index_file(self):
return '{}.idx'.format(self.node_tree_root)
@property
def face_tree_root(self):
return os.path.join(settings.TOPOLOGY_PATH, '{}.faces').format(self.safe_filename)
@property
def face_tree_data_file(self):
return '{}.dat'.format(self.face_tree_root)
@property
def face_tree_index_file(self):
return '{}.idx'.format(self.face_tree_root)
def setup_getfeatureinfo(self, layer, request, location=None):
location = location or 'face'
tree = None
try:
latitude = request.GET['latitude']
longitude = request.GET['longitude']
# Find closest cell or node (only node for now)
if location == 'face':
tree = rtree.index.Index(self.face_tree_root)
elif location == 'node':
tree = rtree.index.Index(self.node_tree_root)
else:
raise NotImplementedError("No RTree for location '{}'".format(location))
try:
nindex = list(tree.nearest((longitude, latitude, longitude, latitude), 1, objects=True))[0]
except IndexError:
raise ValueError("No cells in the {} tree for point {}, {}".format(location, longitude, latitude))
closest_x, closest_y = tuple(nindex.bbox[2:])
geo_index = nindex.object
except BaseException:
raise
finally:
if tree is not None:
tree.close()
all_times = self.times(layer)
start_nc_index = np.searchsorted(all_times, request.GET['starting'], side='left')
start_nc_index = min(start_nc_index, len(all_times) - 1)
end_nc_index = np.searchsorted(all_times, request.GET['ending'], side='right')
end_nc_index = max(end_nc_index, 1) # Always pull the first index
return_dates = all_times[start_nc_index:end_nc_index]
return geo_index, closest_x, closest_y, start_nc_index, end_nc_index, return_dates
def __del__(self):
self.close()
def analyze_virtual_layers(self):
with self.dataset() as nc:
if nc is not None:
# Earth Projected Sea Water Velocity
u_names = ['eastward_sea_water_velocity', 'eastward_sea_water_velocity_assuming_no_tide']
v_names = ['northward_sea_water_velocity', 'northward_sea_water_velocity_assuming_no_tide']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
VirtualLayer.make_vector_layer(us, vs, 'sea_water_velocity', 'vectors', self.id)
# Grid projected Sea Water Velocity
u_names = ['x_sea_water_velocity', 'grid_eastward_sea_water_velocity']
v_names = ['y_sea_water_velocity', 'grid_northward_sea_water_velocity']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
VirtualLayer.make_vector_layer(us, vs, 'grid_sea_water_velocity', 'vectors', self.id)
# Earth projected Winds
u_names = ['eastward_wind']
v_names = ['northward_wind']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
# Hopefully we support barbs eventually
VirtualLayer.make_vector_layer(us, vs, 'winds', 'barbs', self.id)
# Grid projected Winds
u_names = ['x_wind', 'grid_eastward_wind']
v_names = ['y_wind', 'grid_northward_wind']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
# Hopefully we support barbs eventually
VirtualLayer.make_vector_layer(us, vs, 'grid_winds', 'barbs', self.id)
# Earth projected Ice velocity
u_names = ['eastward_sea_ice_velocity']
v_names = ['northward_sea_ice_velocity']
us = nc.get_variables_by_attributes(standard_name=lambda v: v in u_names)
vs = nc.get_variables_by_attributes(standard_name=lambda v: v in v_names)
VirtualLayer.make_vector_layer(us, vs, 'sea_ice_velocity', 'vectors', self.id)
def update_layers(self):
with self.dataset() as nc:
if nc is not None:
for v in nc.variables:
l, _ = Layer.objects.get_or_create(dataset_id=self.id, var_name=v)
nc_var = nc.variables[v]
# *_min and *_max attributes take presendence over the *_range attributes
# scale_* attributes take presedence over valid_* attributes
# *_range
if hasattr(nc_var, 'scale_range'):
l.default_min = try_float(nc_var.scale_range[0])
l.default_max = try_float(nc_var.scale_range[-1])
elif hasattr(nc_var, 'valid_range'):
l.default_min = try_float(nc_var.valid_range[0])
l.default_max = try_float(nc_var.valid_range[-1])
# *_min
if hasattr(nc_var, 'scale_min'):
l.default_min = try_float(nc_var.scale_min)
elif hasattr(nc_var, 'valid_min'):
l.default_min = try_float(nc_var.valid_min)
# *_max
if hasattr(nc_var, 'scale_max'):
l.default_max = try_float(nc_var.scale_max)
elif hasattr(nc_var, 'valid_max'):
l.default_max = try_float(nc_var.valid_max)
# type
if hasattr(nc_var, 'scale_type'):
if nc_var.scale_type in ['logarithmic', 'log']:
l.logscale = True
elif nc_var.scale_type in ['linear']:
l.logscale = False
if hasattr(nc_var, 'standard_name'):
std_name = nc_var.standard_name
l.std_name = std_name
if len(nc_var.dimensions) > 1:
l.active = True
if hasattr(nc_var, 'long_name'):
l.description = nc_var.long_name
if hasattr(nc_var, 'units'):
l.units = nc_var.units
# Set some standard styles
l.styles.add(*Style.defaults())
l.save()
self.analyze_virtual_layers()
def nearest_time(self, layer, time):
"""
Return the time index and time value that is closest
"""
with self.dataset() as nc:
time_vars = nc.get_variables_by_attributes(standard_name='time')
if not time_vars:
return None, None
if len(time_vars) == 1:
time_var = time_vars[0]
else:
# if there is more than variable with standard_name = time
# fine the appropriate one to use with the layer
var_obj = nc.variables[layer.access_name]
time_var_name = find_appropriate_time(var_obj, time_vars)
time_var = nc.variables[time_var_name]
units = time_var.units
if hasattr(time_var, 'calendar'):
calendar = time_var.calendar
else:
calendar = 'gregorian'
num_date = round(nc4.date2num(time, units=units, calendar=calendar))
times = time_var[:]
time_index = np.searchsorted(times, num_date, side='left')
time_index = min(time_index, len(times) - 1) # Don't do over the length of time
return time_index, times[time_index]
|
sci-wms/sci-wms
|
wms/models/datasets/netcdf.py
|
Python
|
gpl-3.0
| 10,531
|
[
"NetCDF"
] |
30e5e60e412d4b4a6d5525504b754f9035387de97cce132e7125742625cee896
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from itertools import combinations
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from scipy.stats import spearmanr
from skbio.stats.distance import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def bioenv(distance_matrix, data_frame, columns=None):
"""Find subset of variables maximally correlated with distances.
Finds subsets of variables whose Euclidean distances (after scaling the
variables; see Notes section below for details) are maximally
rank-correlated with the distance matrix. For example, the distance matrix
might contain distances between communities, and the variables might be
numeric environmental variables (e.g., pH). Correlation between the
community distance matrix and Euclidean environmental distance matrix is
computed using Spearman's rank correlation coefficient (:math:`\\rho`).
Subsets of environmental variables range in size from 1 to the total number
of variables (inclusive). For example, if there are 3 variables, the "best"
variable subsets will be computed for subset sizes 1, 2, and 3.
The "best" subset is chosen by computing the correlation between the
community distance matrix and all possible Euclidean environmental distance
matrices at the given subset size. The combination of environmental
variables with maximum correlation is chosen as the "best" subset.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
data_frame : pandas.DataFrame
Contains columns of variables (e.g., numeric environmental variables
such as pH) associated with the objects in `distance_matrix`. Must be
indexed by the IDs in `distance_matrix` (i.e., the row labels must be
distance matrix IDs), but the order of IDs between `distance_matrix`
and `data_frame` need not be the same. All IDs in the distance matrix
must be present in `data_frame`. Extra IDs in `data_frame` are allowed
(they are ignored in the calculations).
columns : iterable of strs, optional
Column names in `data_frame` to include as variables in the
calculations. If not provided, defaults to all columns in `data_frame`.
The values in each column must be numeric or convertible to a numeric
type.
Returns
-------
pandas.DataFrame
Data frame containing the "best" subset of variables at each subset
size, as well as the correlation coefficient of each.
Raises
------
TypeError
If invalid input types are provided, or if one or more specified
columns in `data_frame` are not numeric.
ValueError
If column name(s) or `distance_matrix` IDs cannot be found in
`data_frame`, if there is missing data (``NaN``) in the environmental
variables, or if the environmental variables cannot be scaled (e.g.,
due to zero variance).
See Also
--------
scipy.stats.spearmanr
Notes
-----
See [1]_ for the original method reference (originally called BIO-ENV).
The general algorithm and interface are similar to ``vegan::bioenv``,
available in R's vegan package [2]_. This method can also be found in
PRIMER-E [3]_ (originally called BIO-ENV, but is now called BEST).
.. warning:: This method can take a *long* time to run if a large number of
variables are specified, as all possible subsets are evaluated at each
subset size.
The variables are scaled before computing the Euclidean distance: each
column is centered and then scaled by its standard deviation.
References
----------
.. [1] Clarke, K. R & Ainsworth, M. 1993. "A method of linking multivariate
community structure to environmental variables". Marine Ecology Progress
Series, 92, 205-219.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
.. [3] http://www.primer-e.com/primer.htm
Examples
--------
Import the functionality we'll use in the following examples:
>>> import pandas as pd
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import bioenv
Load a 4x4 community distance matrix:
>>> dm = DistanceMatrix([[0.0, 0.5, 0.25, 0.75],
... [0.5, 0.0, 0.1, 0.42],
... [0.25, 0.1, 0.0, 0.33],
... [0.75, 0.42, 0.33, 0.0]],
... ['A', 'B', 'C', 'D'])
Load a ``pandas.DataFrame`` with two environmental variables, pH and
elevation:
>>> df = pd.DataFrame([[7.0, 400],
... [8.0, 530],
... [7.5, 450],
... [8.5, 810]],
... index=['A','B','C','D'],
... columns=['pH', 'Elevation'])
Note that the data frame is indexed with the same IDs (``'A'``, ``'B'``,
``'C'``, and ``'D'``) that are in the distance matrix. This is necessary in
order to link the environmental variables (metadata) to each of the objects
in the distance matrix. In this example, the IDs appear in the same order
in both the distance matrix and data frame, but this is not necessary.
Find the best subsets of environmental variables that are correlated with
community distances:
>>> bioenv(dm, df) # doctest: +NORMALIZE_WHITESPACE
size correlation
vars
pH 1 0.771517
pH, Elevation 2 0.714286
We see that in this simple example, pH alone is maximally rank-correlated
with the community distances (:math:`\\rho=0.771517`).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Must provide a DistanceMatrix as input.")
if not isinstance(data_frame, pd.DataFrame):
raise TypeError("Must provide a pandas.DataFrame as input.")
if columns is None:
columns = data_frame.columns.values.tolist()
if len(set(columns)) != len(columns):
raise ValueError("Duplicate column names are not supported.")
if len(columns) < 1:
raise ValueError("Must provide at least one column.")
for column in columns:
if column not in data_frame:
raise ValueError("Column '%s' not in data frame." % column)
# Subset and order the vars data frame to match the IDs in the distance
# matrix, only keeping the specified columns.
vars_df = data_frame.loc[distance_matrix.ids, columns]
if vars_df.isnull().any().any():
raise ValueError("One or more IDs in the distance matrix are not "
"in the data frame, or there is missing data in the "
"data frame.")
try:
vars_df = vars_df.astype(float)
except ValueError:
raise TypeError("All specified columns in the data frame must be "
"numeric.")
# Scale the vars and extract the underlying numpy array from the data
# frame. We mainly do this for performance as we'll be taking subsets of
# columns within a tight loop and using a numpy array ends up being ~2x
# faster.
vars_array = _scale(vars_df).values
dm_flat = distance_matrix.condensed_form()
num_vars = len(columns)
var_idxs = np.arange(num_vars)
# For each subset size, store the best combination of variables:
# (string identifying best vars, subset size, rho)
max_rhos = np.empty(num_vars, dtype=[('vars', object),
('size', int),
('correlation', float)])
for subset_size in range(1, num_vars + 1):
max_rho = None
for subset_idxs in combinations(var_idxs, subset_size):
# Compute Euclidean distances using the current subset of
# variables. pdist returns the distances in condensed form.
vars_dm_flat = pdist(vars_array[:, subset_idxs],
metric='euclidean')
rho = spearmanr(dm_flat, vars_dm_flat)[0]
# If there are ties for the best rho at a given subset size, choose
# the first one in order to match vegan::bioenv's behavior.
if max_rho is None or rho > max_rho[0]:
max_rho = (rho, subset_idxs)
vars_label = ', '.join([columns[i] for i in max_rho[1]])
max_rhos[subset_size - 1] = (vars_label, subset_size, max_rho[0])
return pd.DataFrame.from_records(max_rhos, index='vars')
def _scale(df):
"""Center and scale each column in a data frame.
Each column is centered (by subtracting the mean) and then scaled by its
standard deviation.
"""
# Modified from http://stackoverflow.com/a/18005745
df = df.copy()
df -= df.mean()
df /= df.std()
if df.isnull().any().any():
raise ValueError("Column(s) in the data frame could not be scaled, "
"likely because the column(s) had no variance.")
return df
|
anderspitman/scikit-bio
|
skbio/stats/distance/_bioenv.py
|
Python
|
bsd-3-clause
| 9,511
|
[
"scikit-bio"
] |
a541a82153d1e8b1d34b746e82b6504c4da3ae8c41d9f5f7223267408c1fc155
|
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.norm as norm
import gaussian_process as gp
import autograd.scipy.stats.multivariate_normal as mvn
from autograd import grad
from optimizers import adam
def black_box_variational_inference(logprob, D, num_samples):
"""Implements http://arxiv.org/abs/1401.0118, and uses the
local reparameterization trick from http://arxiv.org/abs/1506.02557"""
def unpack_params(params):
# Variational dist is a diagonal Gaussian.
mean, log_std = params[:D], params[D:2*D]
inputs=np.reshape(params[2*D:3*D],(D,1))
len_sc, variance = params[3*D], params[3*D+1]
meany=mean*inputs
return mean, log_std, inputs, len_sc, variance
def gaussian_entropy(log_std):
return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)
rs = npr.RandomState(0)
def variational_objective(params, t):
"""Provides a stochastic estimate of the variational lower bound."""
mean, log_std,inputs, len_sc, variance = unpack_params(params)
samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
print(log_std)
lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples,inputs,len_sc,variance, t))
return -lower_bound
gradient = grad(variational_objective)
return variational_objective, gradient, unpack_params
if __name__ == '__main__':
# Specify an inference problem by its unnormalized log-posterior.
D = 10;dim=1;N=20;M=10;
params = [0, - 6.32795237, - 0.69221531, - 0.24707744]
# Build params = [0, - 6.32795237, - 0.69221531, - 0.24707744]model and objective function.
num_params, predict, log_marginal_likelihood = \
gp.make_gp_funs(gp.rbf_covariance, num_cov_params=D + 1)
X, y = gp.build_toy_dataset(D=dim,n_data=N)
pseudo, blah = gp.build_toy_dataset(D=dim,n_data=M)
out,blah=gp.build_toy_dataset(D=dim,n_data=100)
objective = lambda params: -log_marginal_likelihood(params, X, y)
def log_posterior(x,inputs ,len_sc,variance, t):
N=x.shape
sum_prob=0
params = [0, len_sc, variance, - 0.24707744]
for i in range(N[0]):
"""An example 2D intractable distribution:
a Gaussian evaluated at zero with a Gaussian prior on the log-variance."""
mu= x[i][:]
pred_mean, pred_cov = predict(params, inputs, mu, X)
prior = log_marginal_likelihood(params,inputs,mu)
posterior = mvn.logpdf(y, pred_mean, pred_cov, True)
sum_prob=posterior+sum_prob+prior
return sum_prob
# Build variational objective.
objective, gradient, unpack_params = \
black_box_variational_inference(log_posterior, D, num_samples=1000)
# Set up plotting code
def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-4, 2], numticks=101):
x = np.linspace(*xlimits, num=numticks)
y = np.linspace(*ylimits, num=numticks)
X, Y = np.meshgrid(x, y)
zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
Z = zs.reshape(X.shape)
plt.contour(X, Y, Z)
ax.set_yticks([])
ax.set_xticks([])
# Set up figure.
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
def callback(params,t,g):
paramis = [0, - 6.32795237, - 0.69221531, - 0.24707744]
print("Log likelihood {}".format(-objective(params,t)))
plt.cla()
# Show posterior marginals.
plot_xs = np.reshape(np.linspace(-7, 7, 300), (300, 1))
mu, log_std = params[:D], params[D:2*D]
pred_mean, pred_cov = predict(paramis, pseudo, mu, plot_xs)
marg_std = np.sqrt(np.diag(pred_cov))
ax.plot(plot_xs, pred_mean, 'b')
ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
np.concatenate([pred_mean - 1.96 * marg_std,
(pred_mean + 1.96 * marg_std)[::-1]]),
alpha=.15, fc='Blue', ec='None')
plt.pause(1.0/30.0)
ax.plot(X, y, 'kx')
ax.set_ylim([-1.5, 1.5])
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
plt.pause(1.0 / 60.0)
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
print("Optimizing variational parameters...")
init_mean = -1 * np.ones(D)
init_log_std = -5 * np.ones(D)
z=pseudo
init_var_params = np.concatenate([init_mean, init_log_std,np.reshape(z,(D)),[1],[1]])
variational_params = adam(gradient, init_var_params, step_size=0.1, num_iters=500, callback=callback)
|
blutooth/gp-svi
|
examples/maxsvi.py
|
Python
|
mit
| 4,900
|
[
"Gaussian"
] |
d314e45588bc5ffa1f4dc419760dad54887f47ab13a1bd87534ce35f1d309c94
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the events signaled by abinit during the execution. It also
provides a parser to extract these events form the main output file and the log file.
"""
from __future__ import unicode_literals, division, print_function
import os.path
import collections
import yaml
from monty.fnmatch import WildCard
from monty.termcolor import colored
from pymatgen.core import Structure
from pymatgen.serializers.json_coders import PMGSONable, pmg_serialize
from .abiinspect import YamlTokenizer
__all__ = [
"EventsParser",
]
def indent(lines, amount, ch=' '):
"""indent the lines in a string by padding each one with proper number of pad characters"""
padding = amount * ch
return padding + ('\n'+padding).join(lines.split('\n'))
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class AbinitEvent(yaml.YAMLObject): #, PMGSONable):
"""
Example (YAML syntax)::
Normal warning without any handler:
--- !Warning
message: |
This is a normal warning that won't
trigger any handler in the python code!
src_file: routine_name
src_line: 112
...
Critical warning that will trigger some action in the python code.
--- !ScfConvergeWarning
message: |
The human-readable message goes here!
src_file: foo.F90
src_line: 112
tolname: tolwfr
actual_tol: 1.0e-8
required_tol: 1.0e-10
nstep: 50
...
The algorithm to extract the YAML sections is very simple.
1) We use YamlTokenizer to extract the documents from the output file
2) If we have a tag that ends with "Warning", "Error", "Bug", "Comment
we know we have encountered a new ABINIT event
3) We parse the document with yaml.load(doc.text) and we get the object
Note that:
# --- and ... become reserved words (whey they are placed at
the begining of a line) since they are used to mark the beginning and
the end of YAML documents.
# All the possible events should subclass `AbinitEvent` and define
the class attribute yaml_tag so that yaml.load will know how to
build the instance.
"""
#color = None
def __init__(self, message, src_file, src_line):
"""
Basic constructor for :class:`AbinitEvent`.
Args:
message: String with human-readable message providing info on the event.
src_file: String with the name of the Fortran file where the event is raised.
src_line Integer giving the line number in src_file.
"""
self.message = message
self._src_file = src_file
self._src_line = src_line
@pmg_serialize
def as_dict(self):
return dict(message=self.message, src_file=self.src_file, src_line=self.src_line)
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
return cls(**d)
@property
def header(self):
return "%s at %s:%s" % (self.name, self.src_file, self.src_line)
def __str__(self):
return "\n".join((self.header, self.message))
@property
def src_file(self):
"""String with the name of the Fortran file where the event is raised."""
try:
return self._src_file
except AttributeError:
return "Unknown"
@property
def src_line(self):
"""Integer giving the line number in src_file."""
try:
return self._src_line
except AttributeError:
return "Unknown"
@property
def name(self):
"""Name of the event (class name)"""
return self.__class__.__name__
@property
def baseclass(self):
"""The baseclass of self."""
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
raise ValueError("Cannot determine the base class of %s" % self.__class__.__name__)
def log_correction(self, task, message):
"""
This method should be called once we have fixed the problem associated to this event.
It adds a new entry in the correction history of the task.
Args:
message (str): Human-readable string with info on the action perfomed to solve the problem.
"""
task._corrections.append(dict(
event=self.as_dict(),
message=message,
))
def correct(self, task):
"""
This method is called when an error is detected in a :class:`Task`
It should perform any corrective measures relating to the detected error.
The idea is similar to the one used in custodian but the handler receives
a :class:`Task` object so that we have access to its methods.
Returns:
(dict) JSON serializable dict that describes the errors and actions taken. E.g.
{"errors": list_of_errors, "actions": list_of_actions_taken}.
If this is an unfixable error, actions should be set to None.
"""
return 0
class AbinitComment(AbinitEvent):
"""Base class for Comment events"""
yaml_tag = '!COMMENT'
color = "blue"
class AbinitError(AbinitEvent):
"""Base class for Error events"""
yaml_tag = '!ERROR'
color = "red"
class AbinitYamlError(AbinitError):
"""Raised if the YAML parser cannot parse the document and the doc tag is an Error."""
class AbinitBug(AbinitEvent):
"""Base class for Bug events"""
yaml_tag = '!BUG'
color = "red"
class AbinitWarning(AbinitEvent):
"""
Base class for Warning events (the most important class).
Developers should subclass this class to define the different exceptions
raised by the code and the possible actions that can be performed.
"""
yaml_tag = '!WARNING'
color = None
class AbinitCriticalWarning(AbinitWarning):
color = "red"
class AbinitYamlWarning(AbinitCriticalWarning):
"""
Raised if the YAML parser cannot parse the document and the doc tas is a Warning.
"""
# Warnings that trigger restart.
class ScfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS SCF cycle did not converge."""
yaml_tag = '!ScfConvergenceWarning'
class NscfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS NSCF cycle did not converge."""
yaml_tag = '!NscfConvergenceWarning'
class RelaxConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the structural relaxation did not converge."""
yaml_tag = '!RelaxConvergenceWarning'
# TODO: for the time being we don't discern between GS and PhononCalculations.
#class PhononConvergenceWarning(AbinitCriticalWarning):
# """Warning raised when the phonon calculation did not converge."""
# yaml_tag = u'!PhononConvergenceWarning'
class QPSConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the QPS iteration (GW) did not converge."""
yaml_tag = '!QPSConvergenceWarning'
class HaydockConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the Haydock method (BSE) did not converge."""
yaml_tag = '!HaydockConvergenceWarning'
# Error classes providing a correct method.
class DilatmxError(AbinitError):
yaml_tag = '!DilatmxError'
def correct(self, task):
#Idea: decrease dilatxm and restart from the last structure.
#We would like to end up with a structures optimized with dilatmx 1.01
#that will be used for phonon calculations.
# Read the last structure dumped by ABINIT before aborting.
print("in dilatmx")
filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
task._change_structure(last_structure)
#changes = task._modify_vars(dilatmx=1.05)
task.history.append("Take last structure from DILATMX_STRUCT.nc, will try to restart")
return 1
# Register the concrete base classes.
_BASE_CLASSES = [
AbinitComment,
AbinitError,
AbinitBug,
AbinitWarning,
]
class EventReport(collections.Iterable):
"""
Iterable storing the events raised by an ABINIT calculation.
Attributes::
stat: information about a file as returned by os.stat
"""
def __init__(self, filename, events=None):
"""
List of ABINIT events.
Args:
filename: Name of the file
events: List of Event objects
"""
self.filename = os.path.abspath(filename)
self.stat = os.stat(self.filename)
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if events is not None:
for ev in events:
self.append(ev)
def __len__(self):
return len(self._events)
def __iter__(self):
return self._events.__iter__()
def __str__(self):
#has_colours = stream_has_colours(stream)
has_colours = True
lines = []
app = lines.append
app("Events for: %s" % self.filename)
for i, event in enumerate(self):
if has_colours:
app("[%d] %s" % (i+1, colored(event.header, color=event.color)))
app(indent(event.message, 4))
else:
app("[%d] %s" % (i+1, str(event)))
app("num_errors: %s, num_warnings: %s, num_comments: %s, completed: %s" % (
self.num_errors, self.num_warnings, self.num_comments, self.run_completed))
return "\n".join(lines)
def append(self, event):
"""Add an event to the list."""
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)
def set_run_completed(self, bool_value):
"""Set the value of _run_completed."""
self._run_completed = bool_value
@property
def run_completed(self):
"""True if the calculation terminated."""
try:
return self._run_completed
except AttributeError:
return False
@property
def comments(self):
"""List of comments found."""
return self.select(AbinitComment)
@property
def errors(self):
"""List of errors found."""
return self.select(AbinitError)
@property
def bugs(self):
"""List of bugs found."""
return self.select(AbinitBug)
@property
def warnings(self):
"""List of warnings found."""
return self.select(AbinitWarning)
@property
def num_warnings(self):
"""Number of warnings reported."""
return len(self.warnings)
@property
def num_errors(self):
"""Number of errors reported."""
return len(self.errors)
@property
def num_comments(self):
"""Number of comments reported."""
return len(self.comments)
def select(self, base_class):
"""
Return the list of events that inherits from class base_class
Args:
only_critical: if True, only critical events are returned.
"""
return self._events_by_baseclass[base_class][:]
def filter_types(self, event_types):
events = []
for ev in self:
if type(ev) in event_types: events.append(ev)
return self.__class__(filename=self.filename, events=events)
class EventsParserError(Exception):
"""Base class for the exceptions raised by :class:`EventsParser`."""
class EventsParser(object):
"""
Parses the output or the log file produced by abinit and extract the list of events.
"""
Error = EventsParserError
# Internal flag used for debugging
DEBUG_LEVEL = 0
def parse(self, filename):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed = False
filename = os.path.abspath(filename)
report = EventReport(filename)
# TODO Use CamelCase for the Fortran messages.
# Bug is still an error of class SoftwareError
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
with YamlTokenizer(filename) as tokens:
for doc in tokens:
#print(80*"*")
#print("doc.tag", doc.tag)
#print("doc", doc)
#print(80*"*")
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
event = yaml.load(doc.text)
except:
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if self.DEBUG_LEVEL:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
run_completed = True
report.set_run_completed(run_completed)
return report
def report_exception(self, filename, exc):
"""
This method is used when self.parser raises an Exception so that
we can report a customized :class:`EventReport` object with info the exception.
"""
return EventReport(filename, events=[AbinitError(str(exc))])
|
sonium0/pymatgen
|
pymatgen/io/abinitio/events.py
|
Python
|
mit
| 14,177
|
[
"ABINIT",
"pymatgen"
] |
bb3ea63cf34cafac616ac784442e114c1dba73c065f5d52115a0f4729cb10aa8
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The UI widgets for the rename dialog
"""
from PyQt4 import QtCore, QtGui
from openlp.core.lib import translate
from openlp.core.lib.ui import create_button_box
class Ui_FileRenameDialog(object):
"""
The UI widgets for the rename dialog
"""
def setupUi(self, fileRenameDialog):
"""
Set up the UI
"""
fileRenameDialog.setObjectName(u'fileRenameDialog')
fileRenameDialog.resize(300, 10)
self.dialogLayout = QtGui.QGridLayout(fileRenameDialog)
self.dialogLayout.setObjectName(u'dialog_layout')
self.fileNameLabel = QtGui.QLabel(fileRenameDialog)
self.fileNameLabel.setObjectName(u'fileNameLabel')
self.dialogLayout.addWidget(self.fileNameLabel, 0, 0)
self.fileNameEdit = QtGui.QLineEdit(fileRenameDialog)
self.fileNameEdit.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp(r'[^/\\?*|<>\[\]":+%]+'), self))
self.fileNameEdit.setObjectName(u'fileNameEdit')
self.dialogLayout.addWidget(self.fileNameEdit, 0, 1)
self.button_box = create_button_box(fileRenameDialog, u'button_box', [u'cancel', u'ok'])
self.dialogLayout.addWidget(self.button_box, 1, 0, 1, 2)
self.retranslateUi(fileRenameDialog)
self.setMaximumHeight(self.sizeHint().height())
def retranslateUi(self, fileRenameDialog):
"""
Translate the UI on the fly.
"""
self.fileNameLabel.setText(translate('OpenLP.FileRenameForm', 'New File Name:'))
|
marmyshev/transitions
|
openlp/core/ui/filerenamedialog.py
|
Python
|
gpl-2.0
| 3,607
|
[
"Brian"
] |
87a04b7a5b22f7f6864cb388c86166b34a3de601bb520682a528dc41b53ca574
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from random import randint
from time import sleep
from django.core.urlresolvers import reverse
from lettuce.django import django_url
from nose.tools import assert_equals
class PageObject(object):
def __init__(self, browser):
self.browser = browser
def visit(self):
self.browser.visit(django_url(self.url))
def validate_url(self):
assert self.browser.url == django_url(self.url)
def fill(self, name, value):
self.browser.fill(name, value)
def is_text_present(self, text, status=True):
assert_equals(status, self.browser.is_text_present(text))
def is_disabled(self, element_id):
try:
element = self.browser.find_by_css('#%s[disabled]' % element_id).first
return True
except Exception, e:
return False
def fill_in_with_js(self, jquery_id, object_id):
script = '%s.val(%s).change(); %s.trigger("liszt:updated").chosen().change()' % (
jquery_id, object_id, jquery_id)
self.browser.execute_script(script)
sleep(2)
def submit(self):
self.browser.find_by_css("form button").first.click()
def see_username_link(self):
self.click_by_css('.btn-navbar')
assert self.browser.find_by_css("#drop-user-settings")
def see_logged_in_status(self, user):
assert self.browser.find_link_by_partial_text("Logged in as: %s" % user.get_full_name())
def see_the_about_link(self):
assert self.browser.find_link_by_text('About')
def find_link_by_text(self,text):
assert self.browser.find_link_by_text(text)
def click_the_about_link(self):
self.browser.click_link_by_text('About')
def check_anonymous_user_allowed_tabs(self):
assert self.browser.find_link_by_text('About')
assert self.browser.find_link_by_text('mMICS')
assert self.browser.find_link_by_text('Login')
def check_data_entry_allowed_tabs(self):
assert self.browser.find_link_by_text('About')
assert self.browser.find_link_by_text('mMICS')
assert self.browser.find_link_by_text('Survey Administration')
def check_researcher_allowed_tabs(self):
self.check_data_entry_allowed_tabs()
assert self.browser.find_link_by_text('Downloads')
assert self.browser.find_link_by_text('Analysis')
def check_all_tabs(self):
self.check_researcher_allowed_tabs()
assert self.browser.find_link_by_text('Settings')
def check_researcher_not_allowed_tabs(self):
assert not self.browser.find_link_by_text('Settings')
def check_data_entry_not_allowed_tabs(self):
self.check_researcher_not_allowed_tabs()
assert not self.browser.find_link_by_text('Downloads')
assert not self.browser.find_link_by_text('Analysis')
def check_anonymous_user_not_allowed_tabs(self):
self.check_data_entry_not_allowed_tabs()
assert not self.browser.find_link_by_text('Survey Administration')
def check_notify_investigators_drop_down_is_not_present(self):
self.browser.click_link_by_text('Survey Administration')
assert not self.browser.find_link_by_text('Notifications')
def choose_radio(self, name, value):
js = "$('input:radio[name=%s][value=%s]').prop('checked', true).change()" % (name, value)
self.browser.execute_script(js)
def see_user_settings_link(self, user):
assert self.browser.find_link_by_partial_text("%s" % str(user.get_full_name()))
def click_user_settings(self):
self.click_by_css("#fold-menu")
sleep(3)
self.click_by_css("#drop-user-settings")
def assert_user_can_see_profile_and_logout_link(self):
links = ["Edit Profile", "Change Password", "Logout"]
for link in links:
assert self.browser.find_link_by_partial_text(link)
def click_reset_password_form(self):
self.browser.find_link_by_partial_text("Change Password").click()
def assert_password_successfully_reset(self):
self.browser.is_text_present("Your password was reset successfully!!")
def click_actions_button(self):
self.browser.find_by_css('#action_caret').first.click()
def click_link_by_text(self, text):
self.browser.click_link_by_text(text)
def fill_valid_values(self, data):
self.browser.fill_form(data)
sleep(2)
def validate_pagination(self):
self.browser.click_link_by_text("2")
def is_radio_selected(self, name, value):
js = "$('input[name=%s]:radio').prop('checked')" % name
return self.browser.execute_script(js) == value
def see_success_message(self, object_name, action_str):
self.is_text_present('%s successfully %s.' % (object_name, action_str))
def select_multiple(self, field_id=None, *data):
for item in data:
script = "$('%s').multiSelect('select', '%s')" % (field_id, item.pk)
self.browser.execute_script(script)
def validate_fields_present(self, fields, status=True):
for field in fields:
self.is_text_present(field, status)
def select_date(self, field_id):
script = "$('%s').focus()" % field_id
self.browser.execute_script(script)
script = "$('.ui-state-default').first().click()"
self.browser.execute_script(script)
def click_tab(self, tab_name):
self.browser.click_link_by_text(tab_name)
def see_dropdown(self, links):
for url_name in links:
assert self.browser.find_link_by_partial_href(reverse(url_name))
def select(self, name, values):
for value in values:
self.browser.select(name, value)
def click_by_css(self, css_selector):
self.browser.find_by_css(css_selector).first.click()
def click_link_by_partial_href(self, modal_id):
self.browser.click_link_by_partial_href(modal_id)
def click_link_by_href(self, modal_id):
self.browser.click_link_by_href(modal_id)
def click_button(self, name):
self.browser.find_by_name(name).first.click()
def find_by_css(self, css_selector, text):
assert self.browser.find_by_css(css_selector).first.value == text
def see_select_option(self, option_list, field_name):
for option in option_list:
assert option in self.browser.find_by_name(field_name).first.text
def option_not_present(self, option_list, field_name):
for option in option_list:
assert not option in self.browser.find_by_name(field_name).first.text
def see_message(self, text):
assert self.browser.is_text_present(text)
def see_confirm_modal_message(self, name, action_str="delete"):
self.is_text_present("Confirm: Are you sure you want to %s %s?" % (action_str, name))
def validate_form_present(self, form):
for key in form.keys():
assert self.browser.find_by_name(key).first
self.is_text_present(form[key])
def validate_form_values(self, form_values):
for key in form_values.keys():
assert self.browser.find_by_name(key).first.value == str(form_values[key])
def field_not_present(self, field_name):
assert not self.browser.find_by_name(field_name)
def field_is_visible(self, field_name):
return self.browser.find_by_name(field_name).first.visible
def find_element_by_css(self, selector):
assert self.browser.find_by_css(selector).first
def is_hidden(self, field, status=True):
assert_equals(status, not self.browser.find_by_css('.hide').first.visible)
def find_by_name(self,name):
assert self.browser.find_by_name(name)
def click_by_name(self,name):
self.browser.find_by_name(name).first.click()
def input_file(self, filename):
self.browser.attach_file('file', filename)
|
unicefuganda/mics
|
survey/features/page_objects/base.py
|
Python
|
bsd-3-clause
| 7,919
|
[
"VisIt"
] |
f7b05935109bac663ba3d4f20418815f6ea116481aeadc711aefe842f8d0eb1e
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from polar_defects_analysis import *
#from glob import glob
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder = '/home/silke/Documents/CurrentProjects/Rastko/Runs/RunsMarchJ1/'
outfolder= '/home/silke/Documents/CurrentProjects/Rastko/analysis/'
#JList=['10', '1', '0.1', '0.01']
#'0.005','0.01','0.02','0.05','0.1','0.2','0.5','1'
vList=['0.1','0.2']
JList=['1']
nu_r='0.002'
phi='1'
r=28.2094791
sigma=1
nstep=10000000
nsave=10000
nsnap=int(nstep/nsave)
skip=0
startvtk=0
for J in JList:
for v0 in vList:
#param = Param(basefolder)
#/home/silke/Documents/CurrentProjects/Rastko/Runs/RunsMarchJ1/data_v0_0.005/data_j_1_sphere/sphere_v0_0.005_j_1_0010000000.dat
files = sorted(glob(basefolder+'/data_v0_' + v0 + '/data_j_' + J +'_sphere/sphere_*.dat'))[skip:]
#files = sorted(glob(basefolder+'J_'+ J +'/sphere_*.dat'))[skip:]
defects=np.zeros((len(files),32))
ndefect=np.zeros((len(files),2))
u=0
for f in files:
print f
outname =basefolder+'/data_v0_' + v0 + '/data_j_' + J +'_sphere/frame_data' + str(u-startvtk)+'.vtk'
if u<startvtk:
defects_n, defects_v,numdefect_n,numdefect_v=getDefects(f,float(r),sigma,outname,'polar',False,False)
else:
defects_n, defects_v,numdefect_n,numdefect_v=getDefects(f,float(r),sigma,outname,'polar',False,True)
outname = '.'.join((f).split('.')[:-1]) + '_defects.vtk'
outname =basefolder+'/data_v0_' + v0 + '/data_j_' + J +'_sphere/frame_defects' + str(u-startvtk)+'.vtk'
print outname
writeDefects(defects_n, defects_v,numdefect_n,numdefect_v,outname)
defects[u,0:4]=defects_n[0,:]
defects[u,4:8]=defects_n[1,:]
defects[u,8:12]=defects_n[2,:]
defects[u,12:16]=defects_n[3,:]
defects[u,16:20]=defects_v[0,:]
defects[u,20:24]=defects_v[1,:]
defects[u,24:28]=defects_v[2,:]
defects[u,28:32]=defects_v[3,:]
ndefect[u,0]=numdefect_n
ndefect[u,1]=numdefect_v
u+=1
outfile2=outfolder + 'defects_J_' + J + 'v0_'+ v0 +'_polar.dat'
np.savetxt(outfile2,np.concatenate((ndefect,defects),axis=1),fmt='%12.6g', header='ndefect (orientation, velocity) defects (orientation, velocity)')
|
sknepneklab/SAMoS
|
analysis/batch_polar/batch_defects_J1_v0.1.py
|
Python
|
gpl-3.0
| 3,265
|
[
"VTK"
] |
5f1040d55406a962ebd52d1fa0f20a162c14292ffd739efa83f0bc20de251cfb
|
import datetime
from django.db import models, transaction
from django.urls import reverse
from django_countries.fields import CountryField
from workshops.mixins import (
AssignmentMixin,
CreatedUpdatedMixin,
COCAgreementMixin,
DataPrivacyAgreementMixin,
EventLinkMixin,
HostResponsibilitiesMixin,
StateMixin,
InstructorAvailabilityMixin,
)
from workshops.models import (
STR_LONGEST,
Language,
KnowledgeDomain,
AcademicLevel,
ComputingExperienceLevel,
Curriculum,
InfoSource,
CommonRequest,
)
class DataVariant(models.Model):
name = models.CharField(
max_length=300,
null=False,
blank=False,
default="",
unique=True,
verbose_name="Name",
help_text="Data variant name and description",
)
unknown = models.BooleanField(
null=False,
blank=True,
default=False,
verbose_name="Unknown entry",
help_text="Mark this record as 'I don't know yet', or "
"'Unknown', or 'Not sure yet'. There can be only one such "
"record in the database.",
)
class Meta:
verbose_name = "Data variant"
verbose_name_plural = "Data variants"
ordering = [
"id",
]
def __str__(self):
return self.name
@transaction.atomic
def save(self, *args, **kwargs):
"""When saving with `unknown=True`, update all other records with this
parameter to `unknown=False`. This helps keeping only one record with
`unknown=True` in the database - a specific case of uniqueness."""
# wrapped in transaction in order to prevent from updating records to
# `unknown=False` when saving fails
if self.unknown:
DataVariant.objects.filter(unknown=True).update(unknown=False)
return super().save(*args, **kwargs)
class WorkshopInquiryRequest(
AssignmentMixin,
StateMixin,
CreatedUpdatedMixin,
CommonRequest,
DataPrivacyAgreementMixin,
COCAgreementMixin,
HostResponsibilitiesMixin,
InstructorAvailabilityMixin,
EventLinkMixin,
models.Model,
):
"""
This model is used for storing inquiry information from anyone interested
in The Carpentries and workshops in general.
"""
UNSURE_CHOICE = ("", "Not sure yet.")
location = models.CharField(
max_length=STR_LONGEST,
blank=False,
null=False,
default="",
verbose_name="Workshop location",
help_text="City, state, or province.",
)
country = CountryField(null=False, blank=False, verbose_name="Country",)
# Here starts "Your Audience" part with this description:
# The Carpentries offers several different workshops intended for audiences
# from different domain backgrounds, with different computational
# experience and learning goals. Your responses to the following questions
# will help us advise you on which workshop(s) may best serve your
# audience. All questions are optional so please share as much as you can.
routine_data = models.ManyToManyField(
DataVariant,
blank=True,
verbose_name="What kinds of data does your target audience routinely "
"work with?",
help_text="Check all that apply.",
)
routine_data_other = models.CharField(
max_length=STR_LONGEST,
blank=True,
default="",
verbose_name="Other kinds of routinely worked-with data",
)
domains = models.ManyToManyField(
KnowledgeDomain,
blank=True,
verbose_name="Domains or topic of interest for target audience",
help_text="The attendees' academic field(s) of study, if known. Check "
"all that apply.",
)
domains_other = models.CharField(
max_length=STR_LONGEST, blank=True, default="", verbose_name="Other domains",
)
academic_levels = models.ManyToManyField(
AcademicLevel,
blank=True,
verbose_name="Attendees' academic level / career stage",
help_text="If you know the academic level(s) of your attendees, "
"indicate them here. Check all that apply.",
)
computing_levels = models.ManyToManyField(
ComputingExperienceLevel,
blank=True,
verbose_name="Attendees' level of computing experience",
help_text="Indicate the attendees' level of computing experience, if "
"known. We will ask attendees to fill in a skills survey "
"before the workshop, so this answer can be an "
"approximation. Check all that apply.",
)
audience_description = models.TextField(
blank=True,
verbose_name="Please describe your anticipated audience, including "
"their experience, background, and goals",
)
SWC_LESSONS_LINK = (
'<a href="https://software-carpentry.org/lessons/">'
"Software Carpentry lessons page</a>"
)
DC_LESSONS_LINK = (
'<a href="http://www.datacarpentry.org/lessons/">'
"Data Carpentry lessons page</a>"
)
LC_LESSONS_LINK = (
'<a href="https://librarycarpentry.org/lessons/">'
"Library Carpentry lessons page</a>"
)
requested_workshop_types = models.ManyToManyField(
Curriculum,
limit_choices_to={"active": True},
blank=True,
verbose_name="Which Carpentries workshop are you requesting?",
help_text="If your learners are new to programming and primarily "
"interested in working with data, Data Carpentry is likely "
"the best choice. If your learners are interested in "
"learning more about programming, including version control"
" and automation, Software Carpentry is likely the best "
"match. If your learners are people working in library and "
"information related roles interested in learning data and "
"software skills, Library Carpentry is the best choice. "
"Please visit the "
+ SWC_LESSONS_LINK
+ ", "
+ DC_LESSONS_LINK
+ ", or the "
+ LC_LESSONS_LINK
+ " for more information about any of our lessons. If you’re "
"not sure and would like to discuss with us, please select "
'the "Don\'t know yet" option below.<br class="mb-1">'
"Check all that apply.",
)
preferred_dates = models.DateField(
blank=True,
null=True,
verbose_name="Preferred dates",
help_text="Our workshops typically run two full days. Please select "
"your preferred first day for the workshop. If you do not "
"have exact dates or are interested in an alternative "
"schedule, please indicate so below. Because we need to "
"coordinate with instructors, a minimum of 2-3 months lead "
"time is required for workshop planning.",
)
other_preferred_dates = models.CharField(
max_length=200,
blank=True,
null=False,
default="",
verbose_name="If your dates are not set, please provide more "
"information below",
)
language = models.ForeignKey(
Language,
on_delete=models.PROTECT,
blank=True,
null=True,
verbose_name="What is the preferred language of communication for the "
"workshop?",
help_text="Our workshops are offered primarily in English, with a few "
"of our lessons available in Spanish. While materials are "
"mainly in English, we know it can be valuable to have an "
"instructor who speaks the native language of the learners. "
"We will attempt to locate Instructors speaking a particular"
" language, but cannot guarantee the availability of "
"non-English speaking Instructors.",
)
ATTENDEES_NUMBER_CHOICES = (
UNSURE_CHOICE,
("10-40", "10-40 (one room, two instructors)"),
("40-80", "40-80 (two rooms, four instructors)"),
("80-120", "80-120 (three rooms, six instructors)"),
)
number_attendees = models.CharField(
max_length=15,
choices=ATTENDEES_NUMBER_CHOICES,
blank=True,
null=True,
default=None,
verbose_name="Anticipated number of attendees",
help_text="These recommendations are for in-person workshops. "
"This number doesn't need to be precise, but will help us "
"decide how many instructors your workshop will need. "
"Each workshop must have at least two instructors.<br>"
"For online Carpentries workshops, we recommend a maximum of "
"20 learners per class. If your workshop attendance will "
"exceed 20 learners please be sure to include a note in the "
"comments section below. ",
)
FEE_CHOICES = (
UNSURE_CHOICE,
(
"nonprofit",
"I am with a government site, university, or other "
"nonprofit. I understand the workshop fee of US$2500, "
"and agree to follow through on The Carpentries "
"invoicing process.",
),
(
"forprofit",
"I am with a corporate or for-profit site. I understand "
"The Carpentries staff will contact me about workshop "
"fees. I will follow through on The Carpentries "
"invoicing process for the agreed upon fee.",
),
(
"member",
"I am with a Member Organisation so the workshop fee does "
"not apply (Instructor travel costs will still apply).",
),
(
"waiver",
"I am requesting a scholarship for the workshop fee "
"(Instructor travel costs will still apply).",
),
)
administrative_fee = models.CharField(
max_length=20,
choices=FEE_CHOICES,
blank=True,
null=True,
default=None,
verbose_name="Which of the following applies to your payment for the "
"administrative fee?",
)
TRAVEL_EXPENCES_MANAGEMENT_CHOICES = (
UNSURE_CHOICE,
(
"booked",
"Hotel and airfare will be booked by site; ground travel "
"and meals/incidentals will be reimbursed within 60 days.",
),
(
"reimbursed",
"All expenses will be booked by instructors and "
"reimbursed within 60 days.",
),
("other", "Other:"),
)
travel_expences_management = models.CharField(
max_length=20,
null=False,
blank=True,
default="",
choices=TRAVEL_EXPENCES_MANAGEMENT_CHOICES,
verbose_name="How will you manage travel expenses for Carpentries "
"Instructors?",
)
travel_expences_management_other = models.CharField(
max_length=STR_LONGEST,
null=False,
blank=True,
default="",
verbose_name="Other travel expences management",
)
travel_expences_agreement = models.BooleanField(
null=False,
blank=False,
default=False,
verbose_name="Regardless of the fee due to The Carpentries, I "
"understand I am also responsible for travel costs for "
"the Instructors which can include airfare, ground "
"travel, hotel, and meals/incidentals. I understand "
"local Instructors will be prioritized but not "
"guaranteed. Instructor travel costs are managed "
"directly between the host site and the Instructors, not "
"through The Carpentries. I will share detailed "
"information regarding policies and procedures for "
"travel arrangements with instructors. All "
"reimbursements will be completed within 60 days of "
"the workshop.",
)
RESTRICTION_CHOICES = (
UNSURE_CHOICE,
("no_restrictions", "No restrictions."),
("other", "Other:"),
)
institution_restrictions = models.CharField(
max_length=20,
null=False,
blank=True,
default="",
choices=RESTRICTION_CHOICES,
verbose_name="Our instructors live, teach, and travel globally. We "
"understand that institutions may have citizenship, "
"confindentiality agreements or other requirements for "
"employees or volunteers who facilitate workshops. If "
"your institution fits this description, please share "
"your requirements or note that there are no "
"restrictions.",
)
institution_restrictions_other = models.CharField(
max_length=STR_LONGEST,
null=False,
blank=True,
default="",
verbose_name="Other (institution restrictions)",
)
carpentries_info_source = models.ManyToManyField(
InfoSource,
blank=True,
verbose_name="How did you hear about The Carpentries?",
help_text="Check all that apply.",
)
carpentries_info_source_other = models.CharField(
max_length=STR_LONGEST,
null=False,
blank=True,
default="",
verbose_name="Other source for information about The Carpentries",
)
user_notes = models.TextField(
blank=True,
verbose_name="Will this workshop be conducted in-person or online? "
"Is there any other information you would like to share "
"with us?",
help_text="Knowing if this workshop is on-line or in-person will "
"help ensure we can best support you in coordinating the event.",
)
# override field `public_event` from CommonRequest mixin
public_event = models.CharField(
max_length=CommonRequest._meta.get_field("public_event").max_length,
null=False,
blank=True,
default="",
choices=(UNSURE_CHOICE,)
+ CommonRequest._meta.get_field("public_event").choices,
verbose_name=CommonRequest._meta.get_field("public_event").verbose_name,
help_text=CommonRequest._meta.get_field("public_event").help_text,
)
class Meta:
ordering = ["created_at"]
def __str__(self):
return (
"Workshop inquiry ({institution}, {personal} {family}) - {state}"
).format(
institution=str(self.institution or self.institution_other_name),
personal=self.personal,
family=self.family,
state=self.get_state_display(),
)
def dates(self):
if self.preferred_dates:
return "{:%Y-%m-%d}".format(self.preferred_dates)
else:
return self.other_preferred_dates
def preferred_dates_too_soon(self):
# set cutoff date at 2 months
cutoff = datetime.timedelta(days=2 * 30)
if self.preferred_dates:
return (self.preferred_dates - self.created_at.date()) < cutoff
return False
def get_absolute_url(self):
return reverse("workshopinquiry_details", args=[self.id])
class SelfOrganisedSubmission(
AssignmentMixin,
StateMixin,
CreatedUpdatedMixin,
CommonRequest,
DataPrivacyAgreementMixin,
COCAgreementMixin,
HostResponsibilitiesMixin,
EventLinkMixin,
models.Model,
):
"""
This model is used for storing user-submitted self-organised workshop
information. It's very similar to Workshop Submission combined with
DC Self-Organized Workshop Request.
"""
start = models.DateField(
null=True,
verbose_name="Workshop start date",
help_text="Please provide the dates that your Self-Organised workshop will"
" run.",
)
end = models.DateField(
null=True,
verbose_name="Workshop end date"
)
workshop_url = models.URLField(
max_length=STR_LONGEST,
blank=True,
null=False,
default="",
verbose_name="Please share your workshop URL",
help_text="Use the link to the website, not the repository. This is "
"typically in the format <a>https://username.github.io/"
"YYYY-MM-DD-sitename</a>. If you are running an online workshop, "
"please use the format YYYY-MM-DD-sitename-online.",
)
FORMAT_CHOICES = (
("standard", "Standard two-day Carpentries workshop"),
("short", "Short session (less than two days)"),
(
"periodic",
"Modules taught over a period of time (several weeks, "
"one semester, etc.)",
),
("other", "Other:"),
)
workshop_format = models.CharField(
max_length=20,
null=False,
blank=False,
default="",
choices=FORMAT_CHOICES,
verbose_name="What is the format of this workshop?",
)
workshop_format_other = models.CharField(
max_length=STR_LONGEST,
null=False,
blank=True,
default="",
verbose_name="Other workshop format",
)
workshop_types = models.ManyToManyField(
Curriculum,
limit_choices_to={"active": True},
blank=False,
verbose_name="Which Carpentries workshop are you teaching?",
)
workshop_types_other = models.CharField(
max_length=STR_LONGEST,
null=False,
blank=True,
default="",
verbose_name="Other workshop types",
)
workshop_types_other_explain = models.TextField(
blank=True,
verbose_name='If you selected "Mix & Match", please provide more'
" information here",
help_text="For example \"We are teaching Software Carpentry's Git "
'lesson only" or "We are teaching Data Carpentry\'s Ecology '
'workshop, but not teaching a programming language."',
)
country = CountryField(null=True, blank=False, verbose_name="Country",)
language = models.ForeignKey(
Language,
on_delete=models.PROTECT,
blank=False,
null=False,
verbose_name="What language is this workshop being conducted in?",
)
class Meta:
verbose_name = "Self-Organised Submission"
verbose_name_plural = "Self-Organised Submissions"
ordering = ["created_at"]
def __str__(self):
return (
"Self-Organised Submission ({institution}, {personal} {family}) - {state}"
).format(
institution=str(self.institution or self.institution_other_name),
personal=self.personal,
family=self.family,
state=self.get_state_display(),
)
def get_absolute_url(self):
return reverse("selforganisedsubmission_details", args=[self.id])
|
swcarpentry/amy
|
amy/extrequests/models.py
|
Python
|
mit
| 18,595
|
[
"VisIt"
] |
53d62f1c42f1f08ed4eb1f56bfaaf775d544a60bd8534d89ab4c66a0b0b4918e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# rmvgridtrigger - remove trigger from vgrid
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.rmvgridtrigger import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/rmvgridtrigger.py
|
Python
|
gpl-2.0
| 1,086
|
[
"Brian"
] |
9f6cb9df0db35639aea0572fb71aaf77160d90bfe54579c2d2c61dbb7e5db4b3
|
#
#
# File to generate network for execution on parallel NEURON
# Note this script has only been tested with UCL's cluster!
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
from sys import *
from time import *
from java.io import File
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.hpc.mpi import MpiSettings
from ucl.physiol.neuroconstruct.simulation import SimulationsInfo
from ucl.physiol.neuroconstruct.neuron import NeuronSettings
path.append(environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc
projFile = File("../Cerebellum.ncx")
########### Main settings ###########
simConfig= "Default Simulation Configuration"
simDuration = 10 # ms
simDt = 0.025 # ms
neuroConstructSeed = 12345
simulatorSeed = 12345
simulators = ["NEURON", "GENESIS", "MOOSE"]
simulators = ["NEURON"]
simRefPrefix = "Test_"
defaultSynapticDelay = 0.5
#mpiConf = MpiSettings.CLUSTER_4PROC
#mpiConf = MpiSettings.LEGION_8PROC
#mpiConf = MpiSettings.LEGION_16PROC
#mpiConf = MpiSettings.LOCAL_SERIAL
#mpiConf = MpiSettings.LEGION_1PROC
#mpiConf = MpiSettings.LEGION_16PROC
mpiConf = MpiSettings.MATLEM_8PROC
#mpiConf = MpiSettings.LEGION_256PROC
#mpiConf = 'Matthau_6_8PROCS'
#mpiConf = 'Matthau_Lemmon_Test_56'
#mpiConf = 'Matthau_Lemmon_Test_ALL'
numMossy = 20000
numGolgi = 10000
numGran = 100000
saveDataAsHdf5 = True
varTimestepNeuron = False
verbose = True
runInBackground= False
suggestedRemoteRunTime = 120
#######################################
### Load neuroConstruct project
print "Loading project from "+ projFile.getCanonicalPath()
pm = ProjectManager()
project = pm.loadProject(projFile)
### Set duration & timestep & simulation configuration
project.simulationParameters.setDt(simDt)
simConfig = project.simConfigInfo.getSimConfig(simConfig)
simConfig.setSimDuration(simDuration)
if saveDataAsHdf5:
project.neuronSettings.setDataSaveFormat(NeuronSettings.DataSaveFormat.HDF5_NC)
### Set simulation reference
index = 0
simRef = "%s%i"%(simRefPrefix,index)
while File( "%s/simulations/%s_N"%(project.getProjectMainDirectory().getCanonicalPath(), simRef)).exists():
simRef = "%s%i"%(simRefPrefix,index)
index = index+1
project.simulationParameters.setReference(simRef)
### Change num in each cell group
project.cellGroupsInfo.getCellPackingAdapter("GoC_3D").setMaxNumberCells(numGolgi) # Note only works if RandomCellPackingAdapter
project.cellGroupsInfo.getCellPackingAdapter("GrC_3D").setMaxNumberCells(numGran)
project.cellGroupsInfo.getCellPackingAdapter("MF_3D").setMaxNumberCells(numMossy)
### Change parallel configuration
mpiSettings = MpiSettings()
simConfig.setMpiConf(mpiSettings.getMpiConfiguration(mpiConf))
print "Parallel configuration: "+ str(simConfig.getMpiConf())
if suggestedRemoteRunTime > 0:
project.neuronFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
project.genesisFileManager.setSuggestedRemoteRunTime(suggestedRemoteRunTime)
### Change synaptic delay associated with each net conn
for netConnName in simConfig.getNetConns():
if netConnName.count("gap")==0:
print "Changing synaptic delay in %s to %f"%(netConnName, defaultSynapticDelay)
delayGen = NumberGenerator(defaultSynapticDelay)
for synProps in project.morphNetworkConnectionsInfo.getSynapseList(netConnName):
synProps.setDelayGenerator(delayGen)
# defaultSynapticDelay will be recorded in simulation.props and listed in SimulationBrowser GUI
SimulationsInfo.addExtraSimProperty("defaultSynapticDelay", str(defaultSynapticDelay))
### Generate network structure in neuroConstruct
pm.doGenerate(simConfig.getName(), neuroConstructSeed)
while pm.isGenerating():
print "Waiting for the project to be generated with Simulation Configuration: "+str(simConfig)
sleep(2)
print "Number of cells generated: " + str(project.generatedCellPositions.getNumberInAllCellGroups())
print "Number of network connections generated: " + str(project.generatedNetworkConnections.getNumAllSynConns())
if simulators.count("NEURON")>0:
simRefN = simRef+"_N"
project.simulationParameters.setReference(simRefN)
nc.generateAndRunNeuron(project,
pm,
simConfig,
simRefN,
simulatorSeed,
verbose=verbose,
runInBackground=runInBackground,
varTimestep=varTimestepNeuron)
sleep(2) # wait a while before running GENESIS...
if simulators.count("GENESIS")>0:
simRefG = simRef+"_G"
project.simulationParameters.setReference(simRefG)
nc.generateAndRunGenesis(project,
pm,
simConfig,
simRefG,
simulatorSeed,
verbose=verbose,
runInBackground=runInBackground)
sleep(2) # wait a while before running MOOSE...
if simulators.count("MOOSE")>0:
simRefM = simRef+"_M"
project.simulationParameters.setReference(simRefM)
nc.generateAndRunMoose(project,
pm,
simConfig,
simRefM,
simulatorSeed,
verbose=verbose,
runInBackground=runInBackground)
sleep(2) # wait a while before running GENESIS...
print "Finished running all sims, shutting down..."
sleep(5)
exit()
|
pgleeson/TestArea
|
models/Cerebellum/pythonScripts/GenNetworkParallel.py
|
Python
|
gpl-2.0
| 6,381
|
[
"MOOSE",
"NEURON"
] |
f15bc6d588292730bc189d2249b2b99cf4b38fd7e63d77491f177ee5938bc970
|
#!/usr/bin/python
"""Test of sayAll."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Add"))
sequence.append(utils.AssertPresentationAction(
"1. KP_Add to do a SayAll",
["SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Bugzilla'",
"SPEECH OUTPUT: 'New bug'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Browse'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Search'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Reports'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Account'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Admin'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '·'",
"SPEECH OUTPUT: 'Help'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Logged In joanmarie.diggs@gmail.com'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Log Out'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Enter Bug: orca \u2013 This page lets you enter a new bug into Bugzilla.'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Before reporting a bug, please read the'",
"SPEECH OUTPUT: 'bug writing guidelines'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', please look at the list of'",
"SPEECH OUTPUT: 'most frequently reported bugs'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', and please'",
"SPEECH OUTPUT: 'search'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'or'",
"SPEECH OUTPUT: 'browse'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for the bug.'",
"SPEECH OUTPUT: 'Reporter:'",
"SPEECH OUTPUT: 'joanmarie.diggs@gmail.com'",
"SPEECH OUTPUT: 'Product:'",
"SPEECH OUTPUT: 'orca'",
"SPEECH OUTPUT: 'Version:'",
"SPEECH OUTPUT: '2.21.x'",
"SPEECH OUTPUT: 'List with 9 items'",
"SPEECH OUTPUT: 'Component'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'List with 5 items'",
"SPEECH OUTPUT: 'GNOME version'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'Unspecified'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'OS'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'Linux'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'Severity'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'normal'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'Summary:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Description:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Optional Fields'",
"SPEECH OUTPUT: 'Cc:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Keywords'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ':'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Depends on:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Blocks:'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Commit'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'Remember values as bookmarkable template'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'We've made a guess at your operating system.'",
"SPEECH OUTPUT: 'Please check it and, if we got it wrong, email bugmaster@gnome.org.'",
"SPEECH OUTPUT: 'Saved Searches:'",
"SPEECH OUTPUT: 'All Orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Firefox'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'open orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Open RFEs'",
"SPEECH OUTPUT: 'link'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
chrys87/orca-beep
|
test/keystrokes/firefox/say_all_enter_bug.py
|
Python
|
lgpl-2.1
| 3,997
|
[
"ORCA"
] |
c0555a3fd0d61c2753cb2254aa27bdac705a52376b1329e3e9350ba383a8e926
|
# -*- coding: utf-8 -*-
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from distutils.version import LooseVersion
import gc
import os
import os.path as op
from pathlib import Path
import shutil
import sys
import warnings
import pytest
# For some unknown reason, on Travis-xenial there are segfaults caused on
# the line pytest -> pdb.Pdb.__init__ -> "import readline". Forcing an
# import here seems to prevent them (!?). This suggests a potential problem
# with some other library stepping on memory where it shouldn't. It only
# seems to happen on the Linux runs that install Mayavi. Anectodally,
# @larsoner has had problems a couple of years ago where a mayavi import
# seemed to corrupt SciPy linalg function results (!), likely due to the
# associated VTK import, so this could be another manifestation of that.
try:
import readline # noqa
except Exception:
pass
import numpy as np
import mne
from mne.datasets import testing
from mne.utils import _pl, _assert_no_instances
test_path = testing.data_path(download=False)
s_path = op.join(test_path, 'MEG', 'sample')
fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
bem_path = op.join(test_path, 'subjects', 'sample', 'bem')
fname_bem = op.join(bem_path, 'sample-1280-bem.fif')
fname_aseg = op.join(test_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
subjects_dir = op.join(test_path, 'subjects')
fname_src = op.join(bem_path, 'sample-oct-4-src.fif')
subjects_dir = op.join(test_path, 'subjects')
fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
fname_trans = op.join(s_path, 'sample_audvis_trunc-trans.fif')
def pytest_configure(config):
"""Configure pytest options."""
# Markers
for marker in ('slowtest', 'ultraslowtest'):
config.addinivalue_line('markers', marker)
# Fixtures
for fixture in ('matplotlib_config',):
config.addinivalue_line('usefixtures', fixture)
# Warnings
# - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0)
# we should remove them from here.
# - This list should also be considered alongside reset_warnings in
# doc/conf.py.
warning_lines = r"""
error::
ignore:.*deprecated and ignored since IPython.*:DeprecationWarning
ignore::ImportWarning
ignore:the matrix subclass:PendingDeprecationWarning
ignore:numpy.dtype size changed:RuntimeWarning
ignore:.*HasTraits.trait_.*:DeprecationWarning
ignore:.*takes no parameters:DeprecationWarning
ignore:joblib not installed:RuntimeWarning
ignore:Using a non-tuple sequence for multidimensional indexing:FutureWarning
ignore:using a non-integer number instead of an integer will result in an error:DeprecationWarning
ignore:Importing from numpy.testing.decorators is deprecated:DeprecationWarning
ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning
ignore:The oldnumeric module will be dropped:DeprecationWarning
ignore:Collection picker None could not be converted to float:UserWarning
ignore:covariance is not positive-semidefinite:RuntimeWarning
ignore:Can only plot ICA components:RuntimeWarning
ignore:Matplotlib is building the font cache using fc-list:UserWarning
ignore:Using or importing the ABCs from 'collections':DeprecationWarning
ignore:`formatargspec` is deprecated:DeprecationWarning
# This is only necessary until sklearn updates their wheels for NumPy 1.16
ignore:numpy.ufunc size changed:RuntimeWarning
ignore:.*mne-realtime.*:DeprecationWarning
ignore:.*imp.*:DeprecationWarning
ignore:Exception creating Regex for oneOf.*:SyntaxWarning
ignore:scipy\.gradient is deprecated.*:DeprecationWarning
ignore:sklearn\.externals\.joblib is deprecated.*:FutureWarning
ignore:The sklearn.*module.*deprecated.*:FutureWarning
ignore:.*trait.*handler.*deprecated.*:DeprecationWarning
ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning
ignore:.*In future, it will be an error for 'np.bool_'.*:DeprecationWarning
ignore:.*`np.bool` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.int` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.float` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.object` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.long` is a deprecated alias:DeprecationWarning
ignore:.*Converting `np\.character` to a dtype is deprecated.*:DeprecationWarning
ignore:.*sphinx\.util\.smartypants is deprecated.*:
ignore:.*pandas\.util\.testing is deprecated.*:
ignore:.*tostring.*is deprecated.*:DeprecationWarning
ignore:.*QDesktopWidget\.availableGeometry.*:DeprecationWarning
ignore:Unable to enable faulthandler.*:UserWarning
always:.*get_data.* is deprecated in favor of.*:DeprecationWarning
always::ResourceWarning
""" # noqa: E501
for warning_line in warning_lines.split('\n'):
warning_line = warning_line.strip()
if warning_line and not warning_line.startswith('#'):
config.addinivalue_line('filterwarnings', warning_line)
# Have to be careful with autouse=True, but this is just an int comparison
# so it shouldn't really add appreciable overhead
@pytest.fixture(autouse=True)
def check_verbose(request):
"""Set to the default logging level to ensure it's tested properly."""
starting_level = mne.utils.logger.level
yield
# ensures that no tests break the global state
try:
assert mne.utils.logger.level == starting_level
except AssertionError:
pytest.fail('.'.join([request.module.__name__,
request.function.__name__]) +
' modifies logger.level')
@pytest.fixture(autouse=True)
def close_all():
"""Close all matplotlib plots, regardless of test status."""
# This adds < 1 µS in local testing, and we have ~2500 tests, so ~2 ms max
import matplotlib.pyplot as plt
yield
plt.close('all')
@pytest.fixture(scope='function')
def verbose_debug():
"""Run a test with debug verbosity."""
with mne.utils.use_log_level('debug'):
yield
@pytest.fixture(scope='session')
def matplotlib_config():
"""Configure matplotlib for viz tests."""
import matplotlib
from matplotlib import cbook
# Allow for easy interactive debugging with a call like:
#
# $ MNE_MPL_TESTING_BACKEND=Qt5Agg pytest mne/viz/tests/test_raw.py -k annotation -x --pdb # noqa: E501
#
try:
want = os.environ['MNE_MPL_TESTING_BACKEND']
except KeyError:
want = 'agg' # don't pop up windows
with warnings.catch_warnings(record=True): # ignore warning
warnings.filterwarnings('ignore')
matplotlib.use(want, force=True)
import matplotlib.pyplot as plt
assert plt.get_backend() == want
# overwrite some params that can horribly slow down tests that
# users might have changed locally (but should not otherwise affect
# functionality)
plt.ioff()
plt.rcParams['figure.dpi'] = 100
try:
from traits.etsconfig.api import ETSConfig
except Exception:
pass
else:
ETSConfig.toolkit = 'qt4'
# Make sure that we always reraise exceptions in handlers
orig = cbook.CallbackRegistry
class CallbackRegistryReraise(orig):
def __init__(self, exception_handler=None):
args = ()
if LooseVersion(matplotlib.__version__) >= LooseVersion('2.1'):
args += (exception_handler,)
super(CallbackRegistryReraise, self).__init__(*args)
cbook.CallbackRegistry = CallbackRegistryReraise
@pytest.fixture(scope='session')
def ci_macos():
"""Determine if running on MacOS CI."""
return (os.getenv('CI', 'false').lower() == 'true' and
sys.platform == 'darwin')
@pytest.fixture(scope='session')
def azure_windows():
"""Determine if running on Azure Windows."""
return (os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true' and
sys.platform.startswith('win'))
@pytest.fixture()
def check_gui_ci(ci_macos, azure_windows):
"""Skip tests that are not reliable on CIs."""
if azure_windows or ci_macos:
pytest.skip('Skipping GUI tests on MacOS CIs and Azure Windows')
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _evoked():
# This one is session scoped, so be sure not to modify it (use evoked
# instead)
evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
baseline=(None, 0))
evoked.crop(0, 0.2)
return evoked
@pytest.fixture()
def evoked(_evoked):
"""Get evoked data."""
return _evoked.copy()
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def noise_cov():
"""Get a noise cov from the testing dataset."""
return mne.read_cov(fname_cov)
@pytest.fixture(scope='function')
def bias_params_free(evoked, noise_cov):
"""Provide inputs for free bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
return _bias_params(evoked, noise_cov, fwd)
@pytest.fixture(scope='function')
def bias_params_fixed(evoked, noise_cov):
"""Provide inputs for fixed bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
return _bias_params(evoked, noise_cov, fwd)
def _bias_params(evoked, noise_cov, fwd):
evoked.pick_types(meg=True, eeg=True, exclude=())
# restrict to limited set of verts (small src here) and one hemi for speed
vertices = [fwd['src'][0]['vertno'].copy(), []]
stc = mne.SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),
vertices, 0., 1.)
fwd = mne.forward.restrict_forward_to_stc(fwd, stc)
assert fwd['sol']['row_names'] == noise_cov['names']
assert noise_cov['names'] == evoked.ch_names
evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info)
data_cov = noise_cov.copy()
data_cov['data'] = np.dot(fwd['sol']['data'], fwd['sol']['data'].T)
assert data_cov['data'].shape[0] == len(noise_cov['names'])
want = np.arange(fwd['sol']['data'].shape[1])
if not mne.forward.is_fixed_orient(fwd):
want //= 3
return evoked, fwd, noise_cov, data_cov, want
@pytest.fixture(scope="module", params=[
"mayavi",
"pyvista",
])
def backend_name(request):
"""Get the backend name."""
yield request.param
@pytest.yield_fixture
def renderer(backend_name, garbage_collect):
"""Yield the 3D backends."""
from mne.viz.backends.renderer import _use_test_3d_backend
_check_skip_backend(backend_name)
with _use_test_3d_backend(backend_name):
from mne.viz.backends import renderer
yield renderer
renderer.backend._close_all()
@pytest.yield_fixture
def garbage_collect():
"""Garbage collect on exit."""
yield
gc.collect()
@pytest.fixture(scope="module", params=[
"pyvista",
"mayavi",
])
def backend_name_interactive(request):
"""Get the backend name."""
yield request.param
@pytest.yield_fixture
def renderer_interactive(backend_name_interactive):
"""Yield the 3D backends."""
from mne.viz.backends.renderer import _use_test_3d_backend
_check_skip_backend(backend_name_interactive)
with _use_test_3d_backend(backend_name_interactive, interactive=True):
from mne.viz.backends import renderer
yield renderer
renderer.backend._close_all()
def _check_skip_backend(name):
from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista,
has_pyqt5, has_imageio_ffmpeg)
if name == 'mayavi':
if not has_mayavi():
pytest.skip("Test skipped, requires mayavi.")
elif name == 'pyvista':
if not has_pyvista():
pytest.skip("Test skipped, requires pyvista.")
if not has_imageio_ffmpeg():
pytest.skip("Test skipped, requires imageio-ffmpeg")
if not has_pyqt5():
pytest.skip("Test skipped, requires PyQt5.")
@pytest.fixture()
def renderer_notebook():
"""Verify that pytest_notebook is installed."""
from mne.viz.backends import renderer
with renderer._use_test_3d_backend('notebook'):
yield renderer
@pytest.fixture(scope='session')
def pixel_ratio():
"""Get the pixel ratio."""
from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista,
has_pyqt5)
if not (has_mayavi() or has_pyvista()) or not has_pyqt5():
return 1.
from PyQt5.QtWidgets import QApplication, QMainWindow
_ = QApplication.instance() or QApplication([])
window = QMainWindow()
ratio = float(window.devicePixelRatio())
window.close()
return ratio
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def subjects_dir_tmp(tmpdir):
"""Copy MNE-testing-data subjects_dir to a temp dir for manipulation."""
for key in ('sample', 'fsaverage'):
shutil.copytree(op.join(subjects_dir, key), str(tmpdir.join(key)))
return str(tmpdir)
# Scoping these as session will make things faster, but need to make sure
# not to modify them in-place in the tests, so keep them private
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _evoked_cov_sphere(_evoked):
"""Compute a small evoked/cov/sphere combo for use with forwards."""
evoked = _evoked.copy().pick_types(meg=True)
evoked.pick_channels(evoked.ch_names[::4])
assert len(evoked.ch_names) == 77
cov = mne.read_cov(fname_cov)
sphere = mne.make_sphere_model('auto', 'auto', evoked.info)
return evoked, cov, sphere
@pytest.fixture(scope='session')
def _fwd_surf(_evoked_cov_sphere):
"""Compute a forward for a surface source space."""
evoked, cov, sphere = _evoked_cov_sphere
src_surf = mne.read_source_spaces(fname_src)
return mne.make_forward_solution(
evoked.info, fname_trans, src_surf, sphere, mindist=5.0)
@pytest.fixture(scope='session')
def _fwd_subvolume(_evoked_cov_sphere):
"""Compute a forward for a surface source space."""
pytest.importorskip('nibabel')
evoked, cov, sphere = _evoked_cov_sphere
volume_labels = ['Left-Cerebellum-Cortex', 'right-Cerebellum-Cortex']
with pytest.raises(ValueError,
match=r"Did you mean one of \['Right-Cere"):
mne.setup_volume_source_space(
'sample', pos=20., volume_label=volume_labels,
subjects_dir=subjects_dir)
volume_labels[1] = 'R' + volume_labels[1][1:]
src_vol = mne.setup_volume_source_space(
'sample', pos=20., volume_label=volume_labels,
subjects_dir=subjects_dir, add_interpolator=False)
return mne.make_forward_solution(
evoked.info, fname_trans, src_vol, sphere, mindist=5.0)
@pytest.fixture(scope='session')
def _all_src_types_fwd(_fwd_surf, _fwd_subvolume):
"""Create all three forward types (surf, vol, mixed)."""
fwds = dict(surface=_fwd_surf, volume=_fwd_subvolume)
with pytest.raises(RuntimeError,
match='Invalid source space with kinds'):
fwds['volume']['src'] + fwds['surface']['src']
# mixed (4)
fwd = fwds['surface'].copy()
f2 = fwds['volume']
for keys, axis in [(('source_rr',), 0),
(('source_nn',), 0),
(('sol', 'data'), 1),
(('_orig_sol',), 1)]:
a, b = fwd, f2
key = keys[0]
if len(keys) > 1:
a, b = a[key], b[key]
key = keys[1]
a[key] = np.concatenate([a[key], b[key]], axis=axis)
fwd['sol']['ncol'] = fwd['sol']['data'].shape[1]
fwd['nsource'] = fwd['sol']['ncol'] // 3
fwd['src'] = fwd['src'] + f2['src']
fwds['mixed'] = fwd
return fwds
@pytest.fixture(scope='session')
def _all_src_types_inv_evoked(_evoked_cov_sphere, _all_src_types_fwd):
"""Compute inverses for all source types."""
evoked, cov, _ = _evoked_cov_sphere
invs = dict()
for kind, fwd in _all_src_types_fwd.items():
assert fwd['src'].kind == kind
with pytest.warns(RuntimeWarning, match='has magnitude'):
invs[kind] = mne.minimum_norm.make_inverse_operator(
evoked.info, fwd, cov)
return invs, evoked
@pytest.fixture(scope='function')
def all_src_types_inv_evoked(_all_src_types_inv_evoked):
"""All source types of inverses, allowing for possible modification."""
invs, evoked = _all_src_types_inv_evoked
invs = {key: val.copy() for key, val in invs.items()}
evoked = evoked.copy()
return invs, evoked
@pytest.fixture(scope='function')
def mixed_fwd_cov_evoked(_evoked_cov_sphere, _all_src_types_fwd):
"""Compute inverses for all source types."""
evoked, cov, _ = _evoked_cov_sphere
return _all_src_types_fwd['mixed'].copy(), cov.copy(), evoked.copy()
@pytest.fixture(scope='session')
@pytest.mark.slowtest
@pytest.mark.parametrize(params=[testing._pytest_param()])
def src_volume_labels():
"""Create a 7mm source space with labels."""
pytest.importorskip('nibabel')
volume_labels = mne.get_volume_labels_from_aseg(fname_aseg)
src = mne.setup_volume_source_space(
'sample', 7., mri='aseg.mgz', volume_label=volume_labels,
add_interpolator=False, bem=fname_bem,
subjects_dir=subjects_dir)
lut, _ = mne.read_freesurfer_lut()
assert len(volume_labels) == 46
assert volume_labels[0] == 'Unknown'
assert lut['Unknown'] == 0 # it will be excluded during label gen
return src, tuple(volume_labels), lut
def _fail(*args, **kwargs):
raise AssertionError('Test should not download')
@pytest.fixture(scope='function')
def download_is_error(monkeypatch):
"""Prevent downloading by raising an error when it's attempted."""
monkeypatch.setattr(mne.utils.fetching, '_get_http', _fail)
@pytest.fixture()
def brain_gc(request):
"""Ensure that brain can be properly garbage collected."""
keys = ('renderer_interactive', 'renderer', 'renderer_notebook')
assert set(request.fixturenames) & set(keys) != set()
for key in keys:
if key in request.fixturenames:
is_pv = request.getfixturevalue(key)._get_3d_backend() == 'pyvista'
close_func = request.getfixturevalue(key).backend._close_all
break
if not is_pv:
yield
return
import pyvista
if LooseVersion(pyvista.__version__) <= LooseVersion('0.26.1'):
yield
return
from mne.viz import Brain
_assert_no_instances(Brain, 'before')
ignore = set(id(o) for o in gc.get_objects())
yield
close_func()
_assert_no_instances(Brain, 'after')
# We only check VTK for PyVista -- Mayavi/PySurfer is not as strict
objs = gc.get_objects()
bad = list()
for o in objs:
try:
name = o.__class__.__name__
except Exception: # old Python, probably
pass
else:
if name.startswith('vtk') and id(o) not in ignore:
bad.append(name)
del o
del objs, ignore, Brain
assert len(bad) == 0, 'VTK objects linger:\n' + '\n'.join(bad)
def pytest_sessionfinish(session, exitstatus):
"""Handle the end of the session."""
n = session.config.option.durations
if n is None:
return
print('\n')
try:
import pytest_harvest
except ImportError:
print('Module-level timings require pytest-harvest')
return
from py.io import TerminalWriter
# get the number to print
res = pytest_harvest.get_session_synthesis_dct(session)
files = dict()
for key, val in res.items():
parts = Path(key.split(':')[0]).parts
# split mne/tests/test_whatever.py into separate categories since these
# are essentially submodule-level tests. Keeping just [:3] works,
# except for mne/viz where we want level-4 granulatity
parts = parts[:4 if parts[:2] == ('mne', 'viz') else 3]
if not parts[-1].endswith('.py'):
parts = parts + ('',)
file_key = '/'.join(parts)
files[file_key] = files.get(file_key, 0) + val['pytest_duration_s']
files = sorted(list(files.items()), key=lambda x: x[1])[::-1]
# print
files = files[:n]
if len(files):
writer = TerminalWriter()
writer.line() # newline
writer.sep('=', f'slowest {n} test module{_pl(n)}')
names, timings = zip(*files)
timings = [f'{timing:0.2f}s total' for timing in timings]
rjust = max(len(timing) for timing in timings)
timings = [timing.rjust(rjust) for timing in timings]
for name, timing in zip(names, timings):
writer.line(f'{timing.ljust(15)}{name}')
|
olafhauk/mne-python
|
mne/conftest.py
|
Python
|
bsd-3-clause
| 20,946
|
[
"Mayavi",
"VTK"
] |
8cc631fc04d65e22cc5f70e18ddb60f46313899ef0b216ebcb8d57131b4c1278
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Neuron.neuron_db_id'
db.add_column(u'neuroelectro_neuron', 'neuron_db_id',
self.gf('django.db.models.fields.IntegerField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Neuron.neuron_db_id'
db.delete_column(u'neuroelectro_neuron', 'neuron_db_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'neuroelectro.api': {
'Meta': {'object_name': 'API'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'author_list_str': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Author']", 'null': 'True', 'symmetrical': 'False'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'pub_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'suggester': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'full_text_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'neuroelectro.articlefulltextstat': {
'Meta': {'object_name': 'ArticleFullTextStat'},
'article_full_text': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.ArticleFullText']"}),
'data_table_ephys_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_human_assigned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'methods_tag_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'neuron_article_map_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'num_unique_ephys_concept_maps': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.articlemetadatamap': {
'Meta': {'object_name': 'ArticleMetaDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.MetaData']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'})
},
u'neuroelectro.articlesummary': {
'Meta': {'object_name': 'ArticleSummary'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.author': {
'Meta': {'object_name': 'Author'},
'first': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'last': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'middle': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.contvalue': {
'Meta': {'object_name': 'ContValue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'mean': ('django.db.models.fields.FloatField', [], {}),
'min_range': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stderr': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'stdev': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.datasource': {
'Meta': {'object_name': 'DataSource'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataTable']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_submission': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.UserSubmission']", 'null': 'True'}),
'user_upload': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.UserUpload']", 'null': 'True'})
},
u'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'needs_expert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
u'neuroelectro.ephysconceptmap': {
'Meta': {'object_name': 'EphysConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'definition': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.EphysPropSyn']", 'symmetrical': 'False'}),
'units': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Unit']", 'null': 'True'})
},
u'neuroelectro.ephyspropsummary': {
'Meta': {'object_name': 'EphysPropSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_mean_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'neuroelectro.institution': {
'Meta': {'object_name': 'Institution'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
u'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Publisher']", 'null': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.mailinglistentry': {
'Meta': {'object_name': 'MailingListEntry'},
'comments': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
u'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.metadata': {
'Meta': {'object_name': 'MetaData'},
'cont_value': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.ContValue']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'neuron_db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
u'neuroelectro.neuronarticlemap': {
'Meta': {'object_name': 'NeuronArticleMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.neuronconceptmap': {
'Meta': {'object_name': 'NeuronConceptMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'neuroelectro.neuronephysdatamap': {
'Meta': {'object_name': 'NeuronEphysDataMap'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysConceptMap']"}),
'err': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'metadata': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.MetaData']", 'symmetrical': 'False'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.NeuronConceptMap']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.DataSource']"}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_norm': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.neuronephyssummary': {
'Meta': {'object_name': 'NeuronEphysSummary'},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.EphysProp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_mean': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'value_sd': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'neuroelectro.neuronsummary': {
'Meta': {'object_name': 'NeuronSummary'},
'cluster_xval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'cluster_yval': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_ephysprops': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
u'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.publisher': {
'Meta': {'object_name': 'Publisher'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'expr_density': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'expr_energy': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'expr_energy_cv': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': u"orm['neuroelectro.BrainRegion']"})
},
u'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'neuroelectro.unit': {
'Meta': {'object_name': 'Unit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
u'neuroelectro.user': {
'Meta': {'object_name': 'User'},
'assigned_neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Institution']", 'null': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_curator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lab_head': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'lab_website_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'neuroelectro.usersubmission': {
'Meta': {'object_name': 'UserSubmission'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.Article']", 'null': 'True'}),
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']"})
},
u'neuroelectro.userupload': {
'Meta': {'object_name': 'UserUpload'},
'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['neuroelectro.User']"})
}
}
complete_apps = ['neuroelectro']
|
lessc0de/neuroelectro_org
|
neuroelectro/south_migrations/0074_auto__add_field_neuron_neuron_db_id.py
|
Python
|
gpl-2.0
| 30,740
|
[
"NEURON"
] |
6ddd53a36c5955188be6b2501136453a7d400a024579efc84be59b050e703124
|
__author__ = "Brian O'Neill"
__version__ = '0.2.1'
import doctest
def main__record_history():
"""
# [The *record_history* decorator](id:record_history-decorator)
The `record_history` decorator is a stripped-down version of `log_calls` which
records calls to a decorated function but writes no messages. You can think
of it as `log_calls` with the `record_history` and `log_call_numbers` settings
always true, and without any of the message-logging apparatus.
Just as the settings of `log_calls` for a decorated function are accessible
dynamically through the `log_calls_settings` attribute, the settings of
`record_history` are exposed via a `record_history_settings` attribute.
`record_history_settings` is an object of the same type as `log_calls_settings`,
so it has the same methods and behaviors described in the [`log_calls_settings`
section](./log_calls.html#Dynamic-control-log_calls_settings) of the `log_calls`
documentation.
Functions decorated by `record_history` have a full-featured `stats` attribute,
as described in the [Call history and statistics](#./log_calls.html#call-history-and-statistics)
section of the `log_calls` documentation.
## [Usage](id:usage)
Import `record_history` just as you would `log_calls`:
>>> from log_calls import record_history
We'll use the following function in our examples:
>>> @record_history()
... def record_me(a, b, x):
... return a * x + b
## [Keyword Parameters](id:parameters)
`record_history` has only three keyword parameters:
Keyword parameter | Default value | Description
----------------: | :------------ | :------------------
`enabled` | `True` | When true, call history will be recorded
`prefix` | `` | A `str` to prefix the function name with in call records
`max_history` | 0 | An `int`. *value* > 0 --> store at most *value*-many records, oldest records overwritten; *value* ≤ 0 --> store unboundedly many records.
Setting `enabled` to true in `record_history` is like setting both `enabled`
and `record_history` to true in `log_calls`.
You can supply an [*indirect value*](./log_calls.html#Indirect-values) for the `enabled` parameter, as described
in the log_calls documentation.
## [The *record_history_settings* attribute](id:record_history_settings-attribute)
These settings are accessible dynamically through the `record_history_settings`
attribute of a decorated function.
>>> len(record_me.record_history_settings)
3
>>> list(record_me.record_history_settings)
['enabled', 'prefix', 'max_history']
>>> list(record_me.record_history_settings.items())
[('enabled', True), ('prefix', ''), ('max_history', 0)]
>>> record_me.record_history_settings.as_OD() # doctest: +NORMALIZE_WHITESPACE
OrderedDict([('enabled', True), ('prefix', ''), ('max_history', 0)])
## [Call history and statistics for *record_history*](id:Call-history-and-statistics-record_history)
We'll just give a few examples here to show that the `stats` attribute of `record_history`
works just like that of `log_calls`. For a complete account, see
the [Call history and statistics](#./log_calls.html#call-history-and-statistics)
section of the `log_calls` documentation.
Let's finally call the function defined above:
>>> for x in range(15):
... _ = record_me(3, 5, x) # "_ = " for doctest
>>> import pprint
>>> len(record_me.stats.history)
15
The tallies:
>>> record_me.stats.num_calls_logged
15
>>> record_me.stats.num_calls_total
15
>>> record_me.stats.elapsed_secs_logged # doctest: +SKIP
2.2172927856445312e-05
Call history in CSV format (ellipses for 'elapsed_secs', `process_secs` and 'timestamp' columns):
>>> print(record_me.stats.history_as_csv) # doctest: +ELLIPSIS
call_num|a|b|x|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|3|5|0|5|...|...|...|'record_me'|['<module>']
2|3|5|1|8|...|...|...|'record_me'|['<module>']
3|3|5|2|11|...|...|...|'record_me'|['<module>']
4|3|5|3|14|...|...|...|'record_me'|['<module>']
5|3|5|4|17|...|...|...|'record_me'|['<module>']
6|3|5|5|20|...|...|...|'record_me'|['<module>']
7|3|5|6|23|...|...|...|'record_me'|['<module>']
8|3|5|7|26|...|...|...|'record_me'|['<module>']
9|3|5|8|29|...|...|...|'record_me'|['<module>']
10|3|5|9|32|...|...|...|'record_me'|['<module>']
11|3|5|10|35|...|...|...|'record_me'|['<module>']
12|3|5|11|38|...|...|...|'record_me'|['<module>']
13|3|5|12|41|...|...|...|'record_me'|['<module>']
14|3|5|13|44|...|...|...|'record_me'|['<module>']
15|3|5|14|47|...|...|...|'record_me'|['<module>']
<BLANKLINE>
Disable recording, call the function again:
>>> record_me.record_history_settings.enabled = False
>>> _ = record_me(583, 298, 1000)
Call numbers of last 2 calls to `record_me`:
>>> list(map(lambda rec: rec.call_num, record_me.stats.history[-2:]))
[14, 15]
and here are the call counters:
>>> record_me.stats.num_calls_logged
15
>>> record_me.stats.num_calls_total
16
Re-enable recording and call the function again:
>>> record_me.record_history_settings.enabled = True
>>> _ = record_me(1900, 2000, 20)
Here are the last 3 lines of the CSV call history:
>>> for line in record_me.stats.history_as_csv.strip().split('\\n')[-3:]: # doctest: +ELLIPSIS
... print(line)
14|3|5|13|44|...|...|'record_me'|['<module>']
15|3|5|14|47|...|...|'record_me'|['<module>']
16|1900|2000|20|40000|...|...|'record_me'|['<module>']
and here are the call updated counters:
>>> record_me.stats.num_calls_logged
16
>>> record_me.stats.num_calls_total
17
Finally, let's call `stats.clear_history`, setting `max_history` to 3,
and examine the call history again:
>>> record_me.stats.clear_history(max_history=3)
>>> for x in range(15):
... _ = record_me(3, 5, x)
>>> print(record_me.stats.history_as_csv) # doctest: +ELLIPSIS
call_num|a|b|x|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
13|3|5|12|41|...|...|...|'record_me'|['<module>']
14|3|5|13|44|...|...|...|'record_me'|['<module>']
15|3|5|14|47|...|...|...|'record_me'|['<module>']
<BLANKLINE>
## [Call history and call chains](id:Call-history-and-call-chains)
An example showing a longer call chain, and call numbers of a decorated
caller appearing in the call chain:
>>> record_me.stats.clear_history()
>>> class Base():
... def call_record_me(self, a, b, n):
... nth = 2**n
... for k in range(nth, 2 * nth):
... record_me(a, b, k)
>>> class Even(Base):
... @record_history()
... def call_it(self, n):
... self.call_record_me(2*n + 1, 3*n + 1, n)
>>> class Odd(Base):
... @record_history()
... def call_it(self, n):
... self.call_record_me(5*n + 1, 7*n + 1, n)
>>> even = Even()
>>> odd = Odd()
>>> for i in range(3):
... (even, odd)[i%2].call_it(i)
>>> even.call_it.stats.num_calls_logged, odd.call_it.stats.num_calls_logged
(2, 1)
>>> record_me.stats.num_calls_logged
7
>>> print(even.call_it.stats.history_as_csv) # doctest: +ELLIPSIS
call_num|self|n|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|<__main__.Even object at ...>|0|None|...|...|...|'Even.call_it'|['<module>']
2|<__main__.Even object at ...>|2|None|...|...|...|'Even.call_it'|['<module>']
<BLANKLINE>
>>> print(odd.call_it.stats.history_as_csv) # doctest: +ELLIPSIS
call_num|self|n|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|<__main__.Odd object at ...>|1|None|...|...|...|'Odd.call_it'|['<module>']
<BLANKLINE>
>>> print(record_me.stats.history_as_csv) # doctest: +ELLIPSIS
call_num|a|b|x|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|1|1|1|2|...|...|...|'record_me'|['call_record_me', 'Even.call_it [1]']
2|6|8|2|20|...|...|...|'record_me'|['call_record_me', 'Odd.call_it [1]']
3|6|8|3|26|...|...|...|'record_me'|['call_record_me', 'Odd.call_it [1]']
4|5|7|4|27|...|...|...|'record_me'|['call_record_me', 'Even.call_it [2]']
5|5|7|5|32|...|...|...|'record_me'|['call_record_me', 'Even.call_it [2]']
6|5|7|6|37|...|...|...|'record_me'|['call_record_me', 'Even.call_it [2]']
7|5|7|7|42|...|...|...|'record_me'|['call_record_me', 'Even.call_it [2]']
<BLANKLINE>
##[*stats.elapsed_secs_logged* == sum of *elapsed_secs* column of call history](id:elapsed_secs_logged-equal-sum-etc)
Equal "to within an epsilon", anyway, allowing for some very small
numerical inaccuracy:
>>> @record_history()
... def slow(n):
... val = []
... for i in range(n):
... val.append("a" * i)
>>> for i in range(100):
... slow(i)
>>> elapsed_col = list(map(lambda rec: getattr(rec, 'elapsed_secs'),
... slow.stats.history))
>>> abs(sum(elapsed_col) - slow.stats.elapsed_secs_logged) < 1.0e-15
True
Similarly,
### [*stats.process_secs_logged* == sum of *process_secs* column of call history](id:process_secs_logged-equal-sum-etc)
>>> process_col = list(map(lambda rec: getattr(rec, 'process_secs'),
... slow.stats.history))
>>> abs(sum(process_col) - slow.stats.process_secs_logged) < 1.0e-15
True
"""
pass
# SURGERY:
main__record_history.__doc__ = \
main__record_history.__doc__.replace("__main__", __name__)
# For unittest integration
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
|
Twangist/log_calls
|
tests/test_record_history.py
|
Python
|
mit
| 9,859
|
[
"Brian"
] |
2c866034961c7a1aa275b04e0dd6eb175474903632526e0e70e1ecb75c15084e
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import spectral.io.envi as envi
import tensorflow as tf
from scipy import optimize
switch_excel = 0
switch_envi = 1
switch_dataFrame = 0
switch_sp_choice_envi = 0
switch_mutiBands = 1
#Modified gaussian Model
def MGM(x,height, center, width, yshift, n = -1):
if type(x) == tf.Tensor:
return yshift + height * tf.exp( - (x**n - center**n)**2 / (width*2) )
else:
return yshift + height * np.exp( - (x**n - center**n)**2 / (width*2) )
#Multiple MGM, input a para list, then they would use the list to construct muti- Gaussian. list construction: [h1,h2,...,hx, c1,...,cx, w1,...,wx]
def multi_MGM(x, params, n = 1):
assert len(params)%3 == 1, 'your input params has different number of width, height and center'
res = 0
height = params[:int(len(params)/3)]
width = params[int(len(params)/3): 2*int(len(params)/3)]
center = params[2*int(len(params)/3):-1]
yshift = params[-1]
num_Gaussian = int(len(params)/3)
for i in range(num_Gaussian):
newGaussian = MGM(np.array(x),height[i],center[i],width[i],yshift = yshift, n = n)
res += newGaussian
return res
#original gaussian function
def gaussian(x, params ):
assert len(params) == 4, 'your input params has different number of width, height and center'
res = 0
height = params[0]
width = params[1]
center = params[2]
yshift = params[3]
return yshift + height * np.exp( - (x - center)**2 / (width*2) )
# Convert 30 dimension reflectance to 256D. Methods: interpolate the hull' function using around 30 points, then cal all wavelength points(256). input an dataFrame hull, whose columns = ['wavelength','reflectance'] and the wavelength list of all 256 dimension. return 256 dimension's reflectance.
def resample(hull, wavelengths):
f = interp1d(hull['wavelength'], hull['reflectance'])
return f(wavelengths)
# core alg. this funtion input an list, [[928,0.25], [935,0.41]....] return the continuum(hull) of this specturm(curve)
def qhull(sample):
link = lambda a,b: np.concatenate((a,b[1:]))
edge = lambda a,b: np.concatenate(([a],[b]))
def dome(sample,base): #alg
h, t = base
dists = np.dot(sample-h, np.dot(((0,-1),(1,0)),(t-h)))
outer = np.repeat(sample, dists>0, axis=0)
if len(outer):
pivot = sample[np.argmax(dists)]
return link(dome(outer, edge(h, pivot)),
dome(outer, edge(pivot, t)))
else:
return base
if len(sample) > 2:
axis = sample[:,0]
base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)#get the first and last point [928,0.25][2506,0.36]
hull = link(dome(sample, base),
dome(sample, base[::-1]))
else:
hull = sample
#discard some incorrect points, whose x coordinate is inverse. (2500,xx) (<2500,xx)
index_end = 0
for i in range(len(hull)):
if hull[i][0] == sample[-1,0]:
index_end = i
break
hull = hull[:index_end+1]
hull = pd.DataFrame(hull,columns = ['wavelength', 'reflectance'])
hull = resample(hull,list(sample[:,0]))
return hull
# this funtion input an filePath (including file Name and format), and sheet namee, return an DataFrame that save the info of this sheet.
def read_excel(filePath = None, sheetName = 'Sheet1'):
assert filePath != None or sheetName != None, 'your filePath is not right, program end.\n'
df_excel = pd.read_excel(filePath, sheetname = sheetName)
return df_excel
# input an envi file path, return spectrum [[band,ref],......] and its continuum [0.95,0.99,0.26,....]
def get_hull_fromEnvi(filePath = None):
sp_lib = envi.open(filePath)
print(sp_lib.names, end = '\n')
if switch_sp_choice_envi :
sp_choice = input('input the specturm index you want to fit. (start with 0) \n')
else:
sp_choice = 0
wavelength = sp_lib.bands.centers
reflectance = sp_lib.spectra[int(sp_choice)]
spectrum = np.array([wavelength,reflectance]).T
hull = qhull(spectrum)
return spectrum, hull
# input excel file path, sheet name and pic file index. Return this file's average specturm reading from excel and the continuum of this avg sp. specturm: [[928,0.25], [935,0.41]....]
def get_hull_fromExcel(filePath =None, sheetName ='Sheet1', sp_index = 0):
df_excel = read_excel(filePath, sheetName = sheetName)
df_values = df_excel.values
list_column = list(df_excel.columns)
x_begin = list_column.index(928.080017)
sp = df_values[sp_index][x_begin:]
wavelength = list_column[x_begin:]
spectrum = np.array([wavelength,sp]).T
hull = qhull(spectrum)
return spectrum, hull
#tensorflow function.
def fitting_tf(spectrum, hull, params, fitting_model = MGM):
# TF graph input
X = tf.placeholder(tf.float32, shape=[len(spectrum[:,0])])
Y = tf.placeholder(tf.float32, shape=[len(spectrum[:,1])])
params_tf = tf.Variable(params)
# Set parameters
learning_rate = 0.3
training_iteration = 3000
# Construct a model
model = fitting_model(spectrum[:,0],list(params))
# Minimize squared errors, loss function.
loss_function = tf.reduce_sum((model - Y)**2)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function) #Gradient descent
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss_function)
# Initialize variables
init = tf.global_variables_initializer()
#attention, this alg use tensorFlow to fit Gaussian and Hull to sp, what kind of alg did paper use? So
# Launch a graph
with tf.Session() as sess:
sess.run(init)
display_step = 20
# Fit all training data
for iteration in range(training_iteration):
training_cost, _ = sess.run([loss_function, optimizer], feed_dict={X: list(spectrum[:,0]), Y: list(spectrum[:,1])})
# Display logs per iteration step
if iteration % display_step == 0:
params_new = sess.run(params, feed_dict={X: list(spectrum[:,0]), Y: list(spectrum[:,1])})
# Final parameters
params_new = sess.run(params, feed_dict={X: list(spectrum[:,0]), Y: list(spectrum[:,1])})
offset = []
centers = [params_new[1],params_new[4],params_new[7]]
for i in centers:
for j in range(len(hull)-1):
if (i >= list_hull[j][0] and i < list_hull[j+1][0]):
offset.append( (i - hull[j][0]) * (hull[j][1] - hull[j+1][1]) / (hull[j][0] - hull[j+1][0]) + hull[j][1] )
return params_new
#least square fitting function
def fitting_leastSquare(spectrum, params, fitting_model = multi_MGM, hull = 0):
errFunc = lambda p, x, y: (y - fitting_model(x, p))**2
para_optim, success = optimize.leastsq(errFunc, params, args=(list(spectrum[:,0]), list(spectrum[:,1])), maxfev = 20000)
return para_optim
#plot figures. Gaussian fitting curve and discret gaussians.
def plot_figures(para_optimize, axis_x, axis_y):
plt.figure('ori and fitting spectrum')
Gaussian_num = int(len(para_optimize)/3)
for i in range(Gaussian_num):
params_temp = []
params_temp.append(para_optimize[i])
params_temp.append(para_optimize[Gaussian_num+i])
params_temp.append(para_optimize[2*Gaussian_num+i])
params_temp.append(para_optimize[-1])
plt.plot(axis_x, gaussian(axis_x, params_temp),label = str(params_temp[2]))
plt.plot(axis_x, axis_y, lw=2, c='g',label='Bastnas band1 ori')
plt.plot(axis_x, multi_MGM( axis_x,para_optimize),lw=0.5, c='r', label='Bastnas band1 fit of 6 MGM')
diff = multi_MGM( axis_x,para_optimize) - axis_y
plt.plot(axis_x, diff, label = 'Difference')
RMS = float(np.sqrt(np.mean( np.array(diff)**2)))
plt.legend()
plt.show()
print(para_optimize,end = '\n')
print('RMS:%f\t\t mean RMS(percent): %f ' % ((RMS),((RMS)/ np.mean(axis_y)*100)) )
def output_params(params_initial,params_optimize,axis_x,axis_y, band_index = 0):
filePath = 'data/'
file_out = open(filePath + 'bastnas_gau_params.txt','a')
file_out.write('band%d: %fnm - %fnm\n' % (band_index, axis_x[0], axis_x[-1]))
file_out.write('initial params: \n')
for item in params_initial:
file_out.write(str(item) + '\t')
file_out.write('\noptimize params: \n')
for item in params_optimize:
file_out.write(str(item) + '\t')
diff = multi_MGM( axis_x,para_optimize) - axis_y
RMS = float(np.sqrt(np.mean( np.array(diff)**2)))
file_out.write('\nRMS: %f percent: %f \n\n' % (RMS,(RMS)/ np.mean(axis_y)*100) )
file_out.close()
return 1
if __name__ == '__main__':
filePath = 'data/'
if switch_excel ==1:
fileName = 'Escondida.xlsx'
#core continuum funtion, return the ori spectrum and the continuum. sp_index in escondida is 0-80(totally 81 pics' avg spectrum in it.)
spectrum, hull = get_hull_fromExcel(filePath = filePath+ fileName, sheetName = 'Sheet1', sp_index = 2)
if switch_envi == 1:
fileName = 'SpectraForAbsorptionFitting.hdr'
spectrum, hull = get_hull_fromEnvi(filePath = filePath + fileName)
#cal the ratio (continuum removal.)
ratio_continuum = spectrum[:,1] / hull
#Below is an example how to use ModifiedGaussianModel.py to get the optimize parameters.
ABP_bands = [(705.619995,770.429993), (770.429993,833.48999),(854.119995,880.380005),(880.380005, 895.549988)]
ABP_index = []
spectrum_band = []
hull_band = []
for band in ABP_bands:
(band_begin,band_end) = (list(spectrum[:,0]).index(band[0]),list(spectrum[:,0]).index(band[1]) )
ABP_index.append((band_begin,band_end))
spectrum_band.append( spectrum[band_begin:band_end])
hull_band.append(hull[band_begin:band_end])
#attention, tensorflow's traning result is nan...... I give up this. and use other method to do fitting.
#fitting_tf(spectrum_band[0], hull_band[0], fitting_model = multi_MGM)
band_index = 3
input_band = spectrum_band[band_index]
axis_x = spectrum_band[band_index][:,0]
axis_y = spectrum_band[band_index][:,1]
#set initial params.
if switch_mutiBands:
center = [889]
height = [0.2 for i in range(len(center))]
width = [5. for i in range(len(center))]
yshift = [0]
params = []
params.extend(height)
params.extend(width)
params.extend(center)
params.extend(yshift)
para_optimize = fitting_leastSquare(input_band, params, fitting_model = multi_MGM, hull = hull_band[0])
#output_params(params, para_optimize, axis_x ,axis_y, band_index = band_index + 1)
plot_figures(para_optimize,axis_x, axis_y)
#height 0.01, RMS .000002 / 8; 0.02 .000099; 0.1 .000192 .000712; -0.01, .000618 .002277
#height 5, RMS .000108/ 399;
|
Vincentyao1995/Globalink2017-UBC
|
python_lib/dvm/ModifiedGaussianModel.py
|
Python
|
mit
| 11,055
|
[
"Gaussian"
] |
5fd51af03aff74dc95b986bf895dd3af827c4c6af7ed7f53003cf30fff37fbd6
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a class to fit ellipses.
"""
import math
from astropy import log
import numpy as np
import numpy.ma as ma
from .harmonics import (first_and_second_harmonic_function,
fit_first_and_second_harmonics)
from .isophote import CentralPixel, Isophote
from .sample import EllipseSample
__all__ = ['EllipseFitter']
__doctest_skip__ = ['EllipseFitter.fit']
PI2 = np.pi / 2
MAX_EPS = 0.95
MIN_EPS = 0.05
DEFAULT_CONVERGENCE = 0.05
DEFAULT_MINIT = 10
DEFAULT_MAXIT = 50
DEFAULT_FFLAG = 0.7
DEFAULT_MAXGERR = 0.5
class EllipseFitter:
"""
Class to fit ellipses.
Parameters
----------
sample : `~photutils.isophote.EllipseSample` instance
The sample data to be fitted.
"""
def __init__(self, sample):
self._sample = sample
def fit(self, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT,
maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR,
going_inwards=False):
"""
Fit an elliptical isophote.
Parameters
----------
conver : float, optional
The main convergence criterion. Iterations stop when the
largest harmonic amplitude becomes smaller (in absolute
value) than ``conver`` times the harmonic fit rms. The
default is 0.05.
minit : int, optional
The minimum number of iterations to perform. A minimum of 10
(the default) iterations guarantees that, on average, 2
iterations will be available for fitting each independent
parameter (the four harmonic amplitudes and the intensity
level). For the first isophote, the minimum number of
iterations is 2 * ``minit`` to ensure that, even departing
from not-so-good initial values, the algorithm has a better
chance to converge to a sensible solution.
maxit : int, optional
The maximum number of iterations to perform. The default is
50.
fflag : float, optional
The acceptable fraction of flagged data points in the
sample. If the actual fraction of valid data points is
smaller than this, the iterations will stop and the current
`~photutils.isophote.Isophote` will be returned. Flagged
data points are points that either lie outside the image
frame, are masked, or were rejected by sigma-clipping. The
default is 0.7.
maxgerr : float, optional
The maximum acceptable relative error in the local radial
intensity gradient. This is the main control for preventing
ellipses to grow to regions of too low signal-to-noise
ratio. It specifies the maximum acceptable relative error
in the local radial intensity gradient. `Busko (1996; ASPC
101, 139)
<https://ui.adsabs.harvard.edu/abs/1996ASPC..101..139B/abstract>`_
showed that the fitting precision relates to that relative
error. The usual behavior of the gradient relative error is
to increase with semimajor axis, being larger in outer,
fainter regions of a galaxy image. In the current
implementation, the ``maxgerr`` criterion is triggered only
when two consecutive isophotes exceed the value specified by
the parameter. This prevents premature stopping caused by
contamination such as stars and HII regions.
A number of actions may happen when the gradient error
exceeds ``maxgerr`` (or becomes non-significant and is set
to `None`). If the maximum semimajor axis specified by
``maxsma`` is set to `None`, semimajor axis growth is
stopped and the algorithm proceeds inwards to the galaxy
center. If ``maxsma`` is set to some finite value, and this
value is larger than the current semimajor axis length, the
algorithm enters non-iterative mode and proceeds outwards
until reaching ``maxsma``. The default is 0.5.
going_inwards : bool, optional
Parameter to define the sense of SMA growth. When fitting
just one isophote, this parameter is used only by the code
that defines the details of how elliptical arc segments
("sectors") are extracted from the image, when using area
extraction modes (see the ``integrmode`` parameter in the
`~photutils.isophote.EllipseSample` class). The default is
`False`.
Returns
-------
result : `~photutils.isophote.Isophote` instance
The fitted isophote, which also contains fit status
information.
Examples
--------
>>> from photutils.isophote import EllipseSample, EllipseFitter
>>> sample = EllipseSample(data, sma=10.)
>>> fitter = EllipseFitter(sample)
>>> isophote = fitter.fit()
"""
sample = self._sample
# this flag signals that limiting gradient error (`maxgerr`)
# wasn't exceeded yet.
lexceed = False
# here we keep track of the sample that caused the minimum harmonic
# amplitude(in absolute value). This will eventually be used to
# build the resulting Isophote in cases where iterations run to
# the maximum allowed (maxit), or the maximum number of flagged
# data points (fflag) is reached.
minimum_amplitude_value = np.Inf
minimum_amplitude_sample = None
# these must be passed throughout the execution chain.
fixed_parameters = self._sample.geometry.fix
for i in range(maxit):
# Force the sample to compute its gradient and associated values.
sample.update(fixed_parameters)
# The extract() method returns sampled values as a 2-d numpy array
# with the following structure:
# values[0] = 1-d array with angles
# values[1] = 1-d array with radii
# values[2] = 1-d array with intensity
values = sample.extract()
# We have to check for a zero-length condition here, and bail out
# in case it is detected. The scipy fitter won't raise an exception
# for zero-length input arrays, but just prints an "INFO" message.
# This may result in an infinite loop.
if len(values[2]) < 1:
s = str(sample.geometry.sma)
log.warning("Too small sample to warrant a fit. SMA is " + s)
sample.geometry.fix = fixed_parameters
return Isophote(sample, i + 1, False, 3)
# Fit harmonic coefficients. Failure in fitting is
# a fatal error; terminate immediately with sample
# marked as invalid.
try:
coeffs = fit_first_and_second_harmonics(values[0], values[2])
coeffs = coeffs[0]
except Exception as e:
log.warning(e)
sample.geometry.fix = fixed_parameters
return Isophote(sample, i + 1, False, 3)
# Mask out coefficients that control fixed ellipse parameters.
free_coeffs = ma.masked_array(coeffs[1:], mask=fixed_parameters)
# Largest non-masked harmonic in absolute value drives the
# correction.
largest_harmonic_index = np.argmax(np.abs(free_coeffs))
largest_harmonic = free_coeffs[largest_harmonic_index]
# see if the amplitude decreased; if yes, keep the
# corresponding sample for eventual later use.
if abs(largest_harmonic) < minimum_amplitude_value:
minimum_amplitude_value = abs(largest_harmonic)
minimum_amplitude_sample = sample
# check if converged
model = first_and_second_harmonic_function(values[0], coeffs)
residual = values[2] - model
if ((conver * sample.sector_area * np.std(residual))
> np.abs(largest_harmonic)):
# Got a valid solution. But before returning, ensure
# that a minimum of iterations has run.
if i >= minit - 1:
sample.update(fixed_parameters)
return Isophote(sample, i + 1, True, 0)
# it may not have converged yet, but the sample contains too
# many invalid data points: return.
if sample.actual_points < (sample.total_points * fflag):
# when too many data points were flagged, return the
# best fit sample instead of the current one.
minimum_amplitude_sample.update(fixed_parameters)
return Isophote(minimum_amplitude_sample, i + 1, True, 1)
# pick appropriate corrector code.
corrector = _CORRECTORS[largest_harmonic_index]
# generate *NEW* EllipseSample instance with corrected
# parameter. Note that this instance is still devoid of other
# information besides its geometry. It needs to be explicitly
# updated for computations to proceed. We have to build a new
# EllipseSample instance every time because of the lazy
# extraction process used by EllipseSample code. To minimize
# the number of calls to the area integrators, we pay a
# (hopefully smaller) price here, by having multiple calls to
# the EllipseSample constructor.
sample = corrector.correct(sample, largest_harmonic)
sample.update(fixed_parameters)
# see if any abnormal (or unusual) conditions warrant
# the change to non-iterative mode, or go-inwards mode.
proceed, lexceed = self._check_conditions(
sample, maxgerr, going_inwards, lexceed)
if not proceed:
sample.update(fixed_parameters)
return Isophote(sample, i + 1, True, -1)
# Got to the maximum number of iterations. Return with
# code 2, and handle it as a valid isophote. Use the
# best fit sample instead of the current one.
minimum_amplitude_sample.update(fixed_parameters)
return Isophote(minimum_amplitude_sample, maxit, True, 2)
@staticmethod
def _check_conditions(sample, maxgerr, going_inwards, lexceed):
proceed = True
# If center wandered more than allowed, put it back
# in place and signal the end of iterative mode.
# if wander:
# if abs(dx) > WANDER(al)) or abs(dy) > WANDER(al):
# sample.geometry.x0 -= dx
# sample.geometry.y0 -= dy
# STOP(al) = ST_NONITERATE
# proceed = False
# check if an acceptable gradient value could be computed.
if sample.gradient_error and sample.gradient_relative_error:
if not going_inwards and (
sample.gradient_relative_error > maxgerr
or sample.gradient >= 0.0):
if lexceed:
proceed = False
else:
lexceed = True
else:
proceed = False
# check if ellipse geometry diverged.
if abs(sample.geometry.eps > MAX_EPS):
proceed = False
if (sample.geometry.x0 < 1. or
sample.geometry.x0 > sample.image.shape[1] or
sample.geometry.y0 < 1. or
sample.geometry.y0 > sample.image.shape[0]):
proceed = False
# See if eps == 0 (round isophote) was crossed.
# If so, fix it but still proceed
if sample.geometry.eps < 0.:
sample.geometry.eps = min(-sample.geometry.eps, MAX_EPS)
if sample.geometry.pa < PI2:
sample.geometry.pa += PI2
else:
sample.geometry.pa -= PI2
# If ellipse is an exact circle, computations will diverge.
# Make it slightly flat, but still proceed
if sample.geometry.eps == 0.0:
sample.geometry.eps = MIN_EPS
return proceed, lexceed
class _ParameterCorrector:
def correct(self, sample, harmonic):
raise NotImplementedError
class _PositionCorrector(_ParameterCorrector):
@staticmethod
def finalize_correction(dx, dy, sample):
new_x0 = sample.geometry.x0 + dx
new_y0 = sample.geometry.y0 + dy
return EllipseSample(sample.image, sample.geometry.sma, x0=new_x0,
y0=new_y0, astep=sample.geometry.astep,
sclip=sample.sclip, nclip=sample.nclip,
eps=sample.geometry.eps,
position_angle=sample.geometry.pa,
linear_growth=sample.geometry.linear_growth,
integrmode=sample.integrmode)
class _PositionCorrector0(_PositionCorrector):
def correct(self, sample, harmonic):
aux = -harmonic * (1. - sample.geometry.eps) / sample.gradient
dx = -aux * math.sin(sample.geometry.pa)
dy = aux * math.cos(sample.geometry.pa)
return self.finalize_correction(dx, dy, sample)
class _PositionCorrector1(_PositionCorrector):
def correct(self, sample, harmonic):
aux = -harmonic / sample.gradient
dx = aux * math.cos(sample.geometry.pa)
dy = aux * math.sin(sample.geometry.pa)
return self.finalize_correction(dx, dy, sample)
class _AngleCorrector(_ParameterCorrector):
def correct(self, sample, harmonic):
eps = sample.geometry.eps
sma = sample.geometry.sma
gradient = sample.gradient
correction = (harmonic * 2. * (1. - eps) / sma / gradient /
((1. - eps)**2 - 1.))
# '% np.pi' to make angle lie between 0 and np.pi radians
new_pa = (sample.geometry.pa + correction) % np.pi
return EllipseSample(sample.image, sample.geometry.sma,
x0=sample.geometry.x0, y0=sample.geometry.y0,
astep=sample.geometry.astep, sclip=sample.sclip,
nclip=sample.nclip, eps=sample.geometry.eps,
position_angle=new_pa,
linear_growth=sample.geometry.linear_growth,
integrmode=sample.integrmode)
class _EllipticityCorrector(_ParameterCorrector):
def correct(self, sample, harmonic):
eps = sample.geometry.eps
sma = sample.geometry.sma
gradient = sample.gradient
correction = harmonic * 2. * (1. - eps) / sma / gradient
new_eps = min((sample.geometry.eps - correction), MAX_EPS)
return EllipseSample(sample.image, sample.geometry.sma,
x0=sample.geometry.x0, y0=sample.geometry.y0,
astep=sample.geometry.astep, sclip=sample.sclip,
nclip=sample.nclip, eps=new_eps,
position_angle=sample.geometry.pa,
linear_growth=sample.geometry.linear_growth,
integrmode=sample.integrmode)
# instances of corrector code live here:
_CORRECTORS = [_PositionCorrector0(), _PositionCorrector1(),
_AngleCorrector(), _EllipticityCorrector()]
class CentralEllipseFitter(EllipseFitter):
"""
A special Fitter class to handle the case of the central pixel in
the galaxy image.
"""
def fit(self, conver=DEFAULT_CONVERGENCE, minit=DEFAULT_MINIT,
maxit=DEFAULT_MAXIT, fflag=DEFAULT_FFLAG, maxgerr=DEFAULT_MAXGERR,
going_inwards=False):
"""
Perform just a simple 1-pixel extraction at the current (x0, y0)
position using bilinear interpolation.
The input parameters are ignored, but included simple to match
the calling signature of the parent class.
Returns
-------
result : `~photutils.isophote.CentralEllipsePixel` instance
The central pixel value. For convenience, the
`~photutils.isophote.CentralEllipsePixel` class inherits
from the `~photutils.isophote.Isophote` class, although it's
not really a true isophote but just a single intensity value
at the central position. Thus, most of its attributes are
hardcoded to `None` or other default value when appropriate.
"""
# default values
fixed_parameters = np.array([False, False, False, False])
self._sample.update(fixed_parameters)
return CentralPixel(self._sample)
|
astropy/photutils
|
photutils/isophote/fitter.py
|
Python
|
bsd-3-clause
| 16,937
|
[
"Galaxy"
] |
81c84be739124362e2fe8659a5deb74c1fc5eab78418bcf93b87a30dfdbd9ae6
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
sdk = config['TITANIUM_SDK']
support_dir = os.path.join(sdk,'module','support')
sys.path.append(support_dir)
import markdown
documentation = []
for file in os.listdir(docdir):
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.test.js')
if not os.path.exists(js_file): return
sdk = config['TITANIUM_SDK']
iphone_dir = os.path.join(sdk,'iphone')
sys.path.insert(0,iphone_dir)
from compiler import Compiler
path = os.path.basename(js_file)
metadata = Compiler.make_function_from_file(path,js_file)
method = metadata['method']
eq = path.replace('.','_')
method = ' return %s;' % method
f = os.path.join(cwd,'Classes','ComTestModuleAssets.m')
c = open(f).read()
idx = c.find('return ')
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','com.test.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
dcarroll/forcemodule
|
build.py
|
Python
|
bsd-3-clause
| 5,808
|
[
"VisIt"
] |
5a97b417f4798111f251a861d17a3c9f3b7feb330a5dfefd756c0d621863bbf5
|
from tao.tests.support.factories import DataSetFactory, DataSetPropertyFactory, GalaxyModelFactory, GlobalParameterFactory, SimulationFactory, UserFactory, SnapshotFactory, StellarModelFactory, DustModelFactory, BandPassFilterFactory, SurveyPresetFactory
from tao.tests.integration_tests.helper import LiveServerTest
class DatasetTests(LiveServerTest):
def setUp(self):
super(DatasetTests, self).setUp()
s1 = SimulationFactory.create()
s2 = SimulationFactory.create()
s3 = SimulationFactory.create()
gm1 = GalaxyModelFactory.create()
gm2 = GalaxyModelFactory.create()
ds1 = DataSetFactory.create(simulation=s1, galaxy_model=gm1, max_job_box_count=25)
ds2 = DataSetFactory.create(simulation=s2, galaxy_model=gm2, max_job_box_count=25)
ds3 = DataSetFactory.create(simulation=s3, galaxy_model=gm2, max_job_box_count=25)
self.default_dataset = ds2
DataSetPropertyFactory.create(dataset=ds1, name='dataset property 1')
DataSetPropertyFactory.create(dataset=ds2, name='dataset property 2')
self.default_dataset.default_filter_field = DataSetPropertyFactory.create(dataset=ds3, name='dataset property 3')
self.default_dataset.save()
SnapshotFactory.create(dataset=self.default_dataset, redshift='0.33')
self.survey_preset = SurveyPresetFactory.create(name='Preset 1', parameters='<xml></xml>')
for i in range(3):
StellarModelFactory.create(label='stellar_label_%03d' % i, name='stellar_name_%03d' % i, description='<p>Description %d </p>' % i)
BandPassFilterFactory.create(label='Band pass filter %03d' % i, filter_id='%d' % i)
DustModelFactory.create(name='Dust_model_%03d.dat' % i, label='Dust model %03d' % i, details='<p>Detail %d </p>' % i)
SnapshotFactory.create(dataset_id=i);
password = 'password'
user = UserFactory.create(username='user', is_superuser=True)
user.set_password(password)
user.save()
self.login(user.username, password)
def test_default_dataset_loads_correctly(self):
GlobalParameterFactory.create(parameter_name='default_dataset', parameter_value=self.default_dataset.pk)
self.visit('mock_galaxy_factory')
self.assert_on_page('mock_galaxy_factory')
self.click('tao-tabs-light_cone')
selected_simulation = self.get_selected_option_text(self.lc_id('dark_matter_simulation'))
self.assertEqual(self.default_dataset.simulation.name, selected_simulation)
selected_galaxy_model = self.get_selected_option_text(self.lc_id('galaxy_model'))
self.assertEqual(self.default_dataset.galaxy_model.name, selected_galaxy_model)
|
IntersectAustralia/asvo-tao
|
web/tao/tests/integration_tests/dataset_tests.py
|
Python
|
gpl-3.0
| 2,741
|
[
"VisIt"
] |
104680f8383050b51b91708cb5175dbdb91fa6b8019dd531d4b345316ffdfab8
|
#!/usr/bin/python
import http.client
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, http.client.NotConnected,
http.client.IncompleteRead, http.client.ImproperConnectionState,
http.client.CannotSendRequest, http.client.CannotSendHeader,
http.client.ResponseNotReady, http.client.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print("Uploading file...")
status, response = insert_request.next_chunk()
if response is not None:
if 'id' in response:
print("Video id '%s' was successfully uploaded." % response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
if __name__ == '__main__':
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="Test Description")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[0], help="Video privacy status.")
args = argparser.parse_args()
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
|
mfwarren/yachtpromo
|
upload_video.py
|
Python
|
mit
| 6,730
|
[
"VisIt"
] |
cbfa3a28ce50f9f1d29db8b235d8496fbe5effdd246bd7df3daca52315b49198
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 17:36:35 2017
@author: asadm2
"""
### DESCRIPTION
#This script carries out an exposure time check and an "at least 2 filters"
#check
import pandas as pd
import numpy as np
def expfil2(objname,revtxt):
"""
This function carries out an exposure time check and also checks if
the images that passed the first check are taken using at least 2 filters
Args:
objname: ECOID of the galaxy
revtxt: objname_rev.txt file that Obj_in_Img.py returns
Returns:
goodObj.txt and badObj.txt files depending on which ECOIDs passed
both checks and which ones didn't
"""
path_to_raw = '/fs1/masad/Research/Repositories/ECO_Globular_Clusters/'\
'data/raw/'
path_to_interim = '/fs1/masad/Research/Repositories/ECO_Globular_Clusters/'\
'data/interim/'
ECO = path_to_raw + 'Available_HST_Data_ECO.txt'
ECO = pd.read_csv(ECO, delimiter='\s+', header=None, \
names=['ECOID', 'HSTOBJ', 'RA', 'DEC', 'exptime', \
'camera', 'filename'])
ECO['exptime'] = pd.to_numeric(ECO['exptime'],errors='coerce')
ECO['filename'] = ECO['filename'].astype('str')
files_arr = ECO['filename'].values
n_files = len(files_arr)
wfc3_ir = ['f110w','f125w','f160w']
wfc3_uvis = ['f606w','f600lp']
filters = [[] for x in range(n_files)]
for i in range(len(ECO['filename'])):
str_split = ECO['filename'][i].split(';')[1].split('_')
filter_i = str_split[3]+'_'+str_split[4]+'_'+str_split[5]
if 'ACS' in filter_i: #acs_wfc
filter_i = filter_i.lower()
elif 'd634' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'm51' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'tile' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'c_v' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'ngc' in filter_i: #acs fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_wfc_'+str_split[2]
elif '131009' in filter_i: #wfc3
str_split = filter_i.split('_')
if str_split[2] == 'f438w':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif str_split[2] == 'f775w':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif 'par' in filter_i:#and any(str in filter_i for str in wfc3_ir):
str_split = filter_i.split('_')
if str_split[2] == 'f606w':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif str_split[2] == 'f125w':
filter_i = str_split[0]+'_ir_'+str_split[2]
elif str_split[2] == 'f160w':
filter_i = str_split[0]+'_ir_'+str_split[2]
elif str_split[2] == 'f110w':
filter_i = str_split[0]+'_ir_'+str_split[2]
elif str_split[2] == 'f600lp':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif 'w_wf' in filter_i: #wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[1]
elif 'lp_wf' in filter_i: #wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[1]
elif 'n4496' in filter_i: #all wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
elif 'n5194' in filter_i: #all wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
elif 'u6614' in filter_i: #all wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
filters[i] = filter_i
filters = np.asarray(filters)
filters_unique = np.unique(filters)
#Adding filter array to DataFrame
ECO.loc[:, 'filters'] = filters
### Exposure time check
exptime_arr = [9399, 3671, 3331, 1319, 2055, 2236, 1758, 10337, 2045, 1237,
2290, 3853, 1928101311, 73024829, 275363, 1241, 31705,
26575,6021, 3548, 3723, 2053, 2249, 3368, 5275, 4069,
171413, 31062, 11431, 5789, 8520, 10071, 6677, 24445, 12605,
10757, 50294]
exp_fil_dict = dict(zip(filters_unique, exptime_arr ))
contents = pd.read_csv(revtxt,header=None,names=['filename'])
contents.filename = 'http://hla.stsci.edu/cgi-bin/' + contents.filename\
.astype(str)
#Match and return all columns associated with this ECOID and filename
#from ECO catalog
ECO2 = ECO.loc[(ECO.filename.isin(contents.filename)) & \
(ECO.ECOID==objname),: ]
ECOID_groups = ECO2.groupby('filters')
ECO_keys = ECOID_groups.groups.keys()
ECO_match3 = []
for key in ECO_keys:
if ECOID_groups.get_group(key).exptime.sum() >= exp_fil_dict[key]:
ECO_match3.append(key) #"good" keys
ECO_match3 = np.array(ECO_match3)
### At least 2 filter check
if len(ECO_match3) >= 2:
result = True
with open(path_to_interim + 'goodObj.txt', 'a') as newfile:
newfile.write(np.unique(ECO2.ECOID)[0]+'\n')
else:
result = False
with open(path_to_interim + 'badObj.txt', 'a') as newfile:
newfile.write(np.unique(ECO2.ECOID)[0]+'\n')
return result
|
MehnaazAsad/ECO_Globular_Clusters
|
src/data/mods_prelim_checks_2/Exp_fil2.py
|
Python
|
mit
| 6,614
|
[
"Galaxy"
] |
477511148e8bf9ad87267a7065bd2ba6ef95a8e99a1e280101a8134c196049cc
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
General spin-orbital CISD
'''
import warnings
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import gccsd
from pyscf.cc import gccsd_rdm
from pyscf.cc.addons import spatial2spin, spin2spatial
from pyscf.ci import cisd
from pyscf.ci import ucisd
from pyscf.fci import cistring
def make_diagonal(myci, eris):
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
mo_energy = eris.fock.diagonal()
jkdiag = numpy.zeros((nmo,nmo), dtype=mo_energy.dtype)
jkdiag[:nocc,:nocc] = numpy.einsum('ijij->ij', eris.oooo)
jkdiag[nocc:,nocc:] = numpy.einsum('ijij->ij', eris.vvvv)
jkdiag[:nocc,nocc:] = numpy.einsum('ijij->ij', eris.ovov)
jksum = jkdiag[:nocc,:nocc].sum()
ehf = mo_energy[:nocc].sum() - jksum * .5
e1diag = numpy.empty((nocc,nvir), dtype=mo_energy.dtype)
e2diag = numpy.empty((nocc,nocc,nvir,nvir), dtype=mo_energy.dtype)
for i in range(nocc):
for a in range(nocc, nmo):
e1diag[i,a-nocc] = ehf - mo_energy[i] + mo_energy[a] - jkdiag[i,a]
for j in range(nocc):
for b in range(nocc, nmo):
e2diag[i,j,a-nocc,b-nocc] = ehf \
- mo_energy[i] - mo_energy[j] \
+ mo_energy[a] + mo_energy[b] \
+ jkdiag[i,j] + jkdiag[a,b] \
- jkdiag[i,a] - jkdiag[j,a] \
- jkdiag[i,b] - jkdiag[j,b]
return amplitudes_to_cisdvec(ehf, e1diag, e2diag)
def contract(myci, civec, eris):
nocc = myci.nocc
nmo = myci.nmo
c0, c1, c2 = cisdvec_to_amplitudes(civec, nmo, nocc)
fock = eris.fock
foo = fock[:nocc,:nocc]
fov = fock[:nocc,nocc:]
fvo = fock[nocc:,:nocc]
fvv = fock[nocc:,nocc:]
t1 = lib.einsum('ie,ae->ia', c1, fvv)
t1 -= lib.einsum('ma,mi->ia', c1, foo)
t1 += lib.einsum('imae,me->ia', c2, fov)
t1 += lib.einsum('nf,nafi->ia', c1, eris.ovvo)
t1 -= 0.5*lib.einsum('imef,maef->ia', c2, eris.ovvv)
t1 -= 0.5*lib.einsum('mnae,mnie->ia', c2, eris.ooov)
tmp = lib.einsum('ijae,be->ijab', c2, fvv)
t2 = tmp - tmp.transpose(0,1,3,2)
tmp = lib.einsum('imab,mj->ijab', c2, foo)
t2 -= tmp - tmp.transpose(1,0,2,3)
t2 += 0.5*lib.einsum('mnab,mnij->ijab', c2, eris.oooo)
t2 += 0.5*lib.einsum('ijef,abef->ijab', c2, eris.vvvv)
tmp = lib.einsum('imae,mbej->ijab', c2, eris.ovvo)
tmp+= numpy.einsum('ia,bj->ijab', c1, fvo)
tmp = tmp - tmp.transpose(0,1,3,2)
t2 += tmp - tmp.transpose(1,0,2,3)
tmp = lib.einsum('ie,jeba->ijab', c1, numpy.asarray(eris.ovvv).conj())
t2 += tmp - tmp.transpose(1,0,2,3)
tmp = lib.einsum('ma,ijmb->ijab', c1, numpy.asarray(eris.ooov).conj())
t2 -= tmp - tmp.transpose(0,1,3,2)
eris_oovv = numpy.asarray(eris.oovv)
t1 += fov.conj() * c0
t2 += eris_oovv.conj() * c0
t0 = numpy.einsum('ia,ia', fov, c1)
t0 += numpy.einsum('ijab,ijab', eris_oovv, c2) * .25
return amplitudes_to_cisdvec(t0, t1, t2)
def amplitudes_to_cisdvec(c0, c1, c2):
nocc, nvir = c1.shape
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
c2tril = lib.take_2d(c2.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
return numpy.hstack((c0, c1.ravel(), c2tril.ravel()))
def cisdvec_to_amplitudes(civec, nmo, nocc):
nvir = nmo - nocc
c0 = civec[0]
c1 = civec[1:nocc*nvir+1].reshape(nocc,nvir)
c2 = ccsd._unpack_4fold(civec[nocc*nvir+1:], nocc, nvir)
return c0, c1, c2
def from_ucisdvec(civec, nocc, orbspin):
'''Convert the (spin-separated) CISD coefficient vector to GCISD
coefficient vector'''
nmoa = numpy.count_nonzero(orbspin == 0)
nmob = numpy.count_nonzero(orbspin == 1)
if isinstance(nocc, int):
nocca = numpy.count_nonzero(orbspin[:nocc] == 0)
noccb = numpy.count_nonzero(orbspin[:nocc] == 1)
else:
nocca, noccb = nocc
nvira, nvirb = nmoa-nocca, nmob-noccb
if civec.size == nocca*nvira + (nocca*nvira)**2 + 1: # RCISD
c0, c1, c2 = cisd.cisdvec_to_amplitudes(civec, nmoa, nocca)
else: # UCISD
c0, c1, c2 = ucisd.cisdvec_to_amplitudes(civec, (nmoa,nmob), (nocca,noccb))
c1 = spatial2spin(c1, orbspin)
c2 = spatial2spin(c2, orbspin)
return amplitudes_to_cisdvec(c0, c1, c2)
from_rcisdvec = from_ucisdvec
def to_ucisdvec(civec, nmo, nocc, orbspin):
'''Convert the GCISD coefficient vector to UCISD coefficient vector'''
c0, c1, c2 = cisdvec_to_amplitudes(civec, nmo, nocc)
c1 = spin2spatial(c1, orbspin)
c2 = spin2spatial(c2, orbspin)
ucisdvec = ucisd.amplitudes_to_cisdvec(c0, c1, c2)
unorm = numpy.linalg.norm(ucisdvec)
if unorm < 1e-2:
raise RuntimeError('GCISD vector corresponds to spin-flip excitation. '
'It cannot be converted to UCISD wfn.'
'norm(UCISD) = %s' % unorm)
elif unorm < 0.99:
warnings.warn('GCISD vector has spin-flip excitation. '
'They are ignored when converting to UCISD wfn. '
'norm(UCISD) = %s' % unorm)
return ucisdvec
def to_fcivec(cisdvec, nelec, orbspin, frozen=None):
assert(numpy.count_nonzero(orbspin == 0) ==
numpy.count_nonzero(orbspin == 1))
norb = len(orbspin)
frozen_mask = numpy.zeros(norb, dtype=bool)
if frozen is None:
pass
elif isinstance(frozen, (int, numpy.integer)):
frozen_mask[:frozen] = True
else:
frozen_mask[frozen] = True
frozen = (numpy.where(frozen_mask[orbspin == 0])[0],
numpy.where(frozen_mask[orbspin == 1])[0])
nelec = (numpy.count_nonzero(orbspin[:nelec] == 0),
numpy.count_nonzero(orbspin[:nelec] == 1))
orbspin = orbspin[~frozen_mask]
nmo = len(orbspin)
nocc = numpy.count_nonzero(~frozen_mask[:sum(nelec)])
ucisdvec = to_ucisdvec(cisdvec, nmo, nocc, orbspin)
return ucisd.to_fcivec(ucisdvec, norb//2, nelec, frozen)
def from_fcivec(ci0, nelec, orbspin, frozen=None):
if not (frozen is None or frozen == 0):
raise NotImplementedError
assert(numpy.count_nonzero(orbspin == 0) ==
numpy.count_nonzero(orbspin == 1))
norb = len(orbspin)
frozen_mask = numpy.zeros(norb, dtype=bool)
if frozen is None:
pass
elif isinstance(frozen, (int, numpy.integer)):
frozen_mask[:frozen] = True
else:
frozen_mask[frozen] = True
#frozen = (numpy.where(frozen_mask[orbspin == 0])[0],
# numpy.where(frozen_mask[orbspin == 1])[0])
nelec = (numpy.count_nonzero(orbspin[:nelec] == 0),
numpy.count_nonzero(orbspin[:nelec] == 1))
ucisdvec = ucisd.from_fcivec(ci0, norb//2, nelec, frozen)
nocc = numpy.count_nonzero(~frozen_mask[:sum(nelec)])
return from_ucisdvec(ucisdvec, nocc, orbspin[~frozen_mask])
def make_rdm1(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
One-particle density matrix in the molecular spin-orbital representation
(the occupied-virtual blocks from the orbital response contribution are
not included).
dm1[p,q] = <q^\dagger p> (p,q are spin-orbitals)
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
return gccsd_rdm._make_rdm1(myci, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Two-particle density matrix in the molecular spin-orbital representation
dm2[p,q,r,s] = <p^\dagger r^\dagger s q>
where p,q,r,s are spin-orbitals. p,q correspond to one particle and r,s
correspond to another particle. The contraction between ERIs (in
Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
d2 = _gamma2_intermediates(myci, civec, nmo, nocc)
return gccsd_rdm._make_rdm2(myci, d1, d2, with_dm1=True, with_frozen=True,
ao_repr=ao_repr)
def _gamma1_intermediates(myci, civec, nmo, nocc):
c0, c1, c2 = cisdvec_to_amplitudes(civec, nmo, nocc)
dvo = c0.conj() * c1.T
dvo += numpy.einsum('jb,ijab->ai', c1.conj(), c2)
dov = dvo.T.conj()
doo =-numpy.einsum('ia,ka->ik', c1.conj(), c1)
doo -= numpy.einsum('jiab,kiab->jk', c2.conj(), c2) * .5
dvv = numpy.einsum('ia,ic->ac', c1, c1.conj())
dvv += numpy.einsum('ijab,ijac->bc', c2, c2.conj()) * .5
return doo, dov, dvo, dvv
def _gamma2_intermediates(myci, civec, nmo, nocc):
c0, c1, c2 = cisdvec_to_amplitudes(civec, nmo, nocc)
goovv = c0 * c2.conj() * .5
govvv = numpy.einsum('ia,ikcd->kadc', c1, c2.conj()) * .5
gooov = numpy.einsum('ia,klac->klic', c1, c2.conj()) *-.5
goooo = numpy.einsum('ijab,klab->ijkl', c2.conj(), c2) * .25
gvvvv = numpy.einsum('ijab,ijcd->abcd', c2, c2.conj()) * .25
govvo = numpy.einsum('ijab,ikac->jcbk', c2.conj(), c2)
govvo+= numpy.einsum('ia,jb->ibaj', c1.conj(), c1)
dovov = goovv.transpose(0,2,1,3) - goovv.transpose(0,3,1,2)
doooo = goooo.transpose(0,2,1,3) - goooo.transpose(0,3,1,2)
dvvvv = gvvvv.transpose(0,2,1,3) - gvvvv.transpose(0,3,1,2)
dovvo = govvo.transpose(0,2,1,3)
dooov = gooov.transpose(0,2,1,3) - gooov.transpose(1,2,0,3)
dovvv = govvv.transpose(0,2,1,3) - govvv.transpose(0,3,1,2)
doovv = None
dvvov = None
return dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov
def trans_rdm1(myci, cibra, ciket, nmo=None, nocc=None):
r'''
One-particle transition density matrix in the molecular spin-orbital
representation.
dm1[p,q] = <q^\dagger p> (p,q are spin-orbitals)
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
c0bra, c1bra, c2bra = myci.cisdvec_to_amplitudes(cibra, nmo, nocc)
c0ket, c1ket, c2ket = myci.cisdvec_to_amplitudes(ciket, nmo, nocc)
dvo = c0bra.conj() * c1ket.T
dvo += numpy.einsum('jb,ijab->ai', c1bra.conj(), c2ket)
dov = c0ket * c1bra.conj()
dov += numpy.einsum('jb,ijab->ia', c1ket, c2bra.conj())
doo =-numpy.einsum('ia,ka->ik', c1bra.conj(), c1ket)
doo -= numpy.einsum('jiab,kiab->jk', c2bra.conj(), c2ket) * .5
dvv = numpy.einsum('ia,ic->ac', c1ket, c1bra.conj())
dvv += numpy.einsum('ijab,ijac->bc', c2ket, c2bra.conj()) * .5
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo
dm1[:nocc,nocc:] = dov
dm1[nocc:,:nocc] = dvo
dm1[nocc:,nocc:] = dvv
norm = numpy.dot(cibra, ciket)
dm1[numpy.diag_indices(nocc)] += norm
if myci.frozen is not None:
nmo = myci.mo_occ.size
nocc = numpy.count_nonzero(myci.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = norm
moidx = numpy.where(myci.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
return dm1
class GCISD(cisd.CISD):
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
noo = nocc * (nocc-1) // 2
nvv = nvir * (nvir-1) // 2
return 1 + nocc*nvir + noo*nvv
def get_init_guess(self, eris=None, nroots=1, diag=None):
# MP2 initial guess
if eris is None: eris = self.ao2mo(self.mo_coeff)
time0 = time.clock(), time.time()
mo_e = eris.mo_energy
nocc = self.nocc
eia = mo_e[:nocc,None] - mo_e[None,nocc:]
eijab = lib.direct_sum('ia,jb->ijab',eia,eia)
ci0 = 1
ci1 = eris.fock[:nocc,nocc:] / eia
eris_oovv = numpy.array(eris.oovv)
ci2 = eris_oovv / eijab
self.emp2 = 0.25*numpy.einsum('ijab,ijab', ci2.conj(), eris_oovv).real
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
logger.timer(self, 'init mp2', *time0)
if abs(self.emp2) < 1e-3 and abs(ci1).sum() < 1e-3:
ci1 = 1. / eia
ci_guess = amplitudes_to_cisdvec(ci0, ci1, ci2)
if nroots > 1:
civec_size = ci_guess.size
dtype = ci_guess.dtype
nroots = min(ci1.size+1, nroots) # Consider Koopmans' theorem only
if diag is None:
idx = range(1, nroots)
else:
idx = diag[:ci1.size+1].argsort()[1:nroots] # exclude HF determinant
ci_guess = [ci_guess]
for i in idx:
g = numpy.zeros(civec_size, dtype)
g[i] = 1.0
ci_guess.append(g)
return self.emp2, ci_guess
def ao2mo(self, mo_coeff=None):
nmo = self.nmo
mem_incore = nmo**4*2 * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return gccsd._make_eris_incore(self, mo_coeff)
elif getattr(self._scf, 'with_df', None):
raise NotImplementedError
else:
return gccsd._make_eris_outcore(self, mo_coeff)
contract = contract
make_diagonal = make_diagonal
_dot = None
def to_fcivec(self, cisdvec, nelec, orbspin, frozen=None):
return to_fcivec(cisdvec, nelec, orbspin, frozen)
def from_fcivec(self, fcivec, nelec, orbspin, frozen=None):
return from_fcivec(fcivec, nelec, orbspin, frozen)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
trans_rdm1 = trans_rdm1
def amplitudes_to_cisdvec(self, c0, c1, c2):
return amplitudes_to_cisdvec(c0, c1, c2)
def cisdvec_to_amplitudes(self, civec, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return cisdvec_to_amplitudes(civec, nmo, nocc)
@lib.with_doc(from_ucisdvec.__doc__)
def from_ucisdvec(self, civec, nocc=None, orbspin=None):
if nocc is None: nocc = self.nocc
if orbspin is None:
orbspin = getattr(self.mo_coeff, 'orbspin', None)
if orbspin is not None:
orbspin = orbspin[self.get_frozen_mask()]
assert(orbspin is not None)
return from_ucisdvec(civec, nocc, orbspin=orbspin)
from_rcisdvec = from_ucisdvec
@lib.with_doc(to_ucisdvec.__doc__)
def to_ucisdvec(self, civec, orbspin=None):
if orbspin is None:
orbspin = getattr(self.mo_coeff, 'orbspin', None)
if orbspin is not None:
orbspin = orbspin[self.get_frozen_mask()]
return to_ucisdvec(civec, self.nmo, self.nocc, orbspin)
def spatial2spin(self, tx, orbspin=None):
if orbspin is None:
orbspin = getattr(self.mo_coeff, 'orbspin', None)
if orbspin is not None:
orbspin = orbspin[self.get_frozen_mask()]
return spatial2spin(tx, orbspin)
def spin2spatial(self, tx, orbspin=None):
if orbspin is None:
orbspin = getattr(self.mo_coeff, 'orbspin', None)
if orbspin is not None:
orbspin = orbspin[self.get_frozen_mask()]
return spin2spatial(tx, orbspin)
CISD = GCISD
from pyscf import scf
scf.ghf.GHF.CISD = lib.class_as_method(CISD)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.cc.addons import spatial2spin
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'sto-3g',
'O': 'sto-3g',}
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
gmf = scf.addons.convert_to_ghf(mf)
myci = GCISD(gmf)
eris = myci.ao2mo()
ecisd, civec = myci.kernel(eris=eris)
print(ecisd - -0.048878084082066106)
nmo = eris.mo_coeff.shape[1]
rdm1 = myci.make_rdm1(civec, nmo, mol.nelectron)
rdm2 = myci.make_rdm2(civec, nmo, mol.nelectron)
mo = eris.mo_coeff[:7] + eris.mo_coeff[7:]
eri = ao2mo.kernel(mf._eri, mo, compact=False).reshape([nmo]*4)
eri[eris.orbspin[:,None]!=eris.orbspin,:,:] = 0
eri[:,:,eris.orbspin[:,None]!=eris.orbspin] = 0
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
h1e = numpy.zeros((nmo,nmo))
idxa = eris.orbspin == 0
idxb = eris.orbspin == 1
h1e[idxa[:,None]&idxa] = h1a.ravel()
h1e[idxb[:,None]&idxb] = h1b.ravel()
e2 = (numpy.einsum('ij,ji', h1e, rdm1) +
numpy.einsum('ijkl,ijkl', eri, rdm2) * .5)
e2 += mol.energy_nuc()
print(myci.e_tot - e2) # = 0
print(abs(rdm1 - numpy.einsum('ijkk->ji', rdm2)/(mol.nelectron-1)).sum())
|
gkc1000/pyscf
|
pyscf/ci/gcisd.py
|
Python
|
apache-2.0
| 18,007
|
[
"PySCF"
] |
353fe3be47b9749740f338381f651c08517372772bf79729670b1cd630cf0bf5
|
from property import *
import nest
import numpy.random as random
# Neuron parameters
iaf_neuronparams = {'E_L': -70., # Resting membrane potential in mV
'V_th': -50., # Spike threshold in mV
'V_reset': -67., # Reset membrane potential after a spike in mV
'C_m': 2., # Capacity of the membrane in pF
't_ref': 2., # Duration of refractory period (V_m = V_reset) in ms
'V_m': -60., # Membrane potential in mV at start
'tau_syn_ex': 1., # Time constant of postsynaptic excitatory currents in ms
'tau_syn_in': 1.33} # Time constant of postsynaptic inhibitory currents in ms
# Synapse common parameters
STDP_synapseparams = {
'alpha': random.normal(0.5, 5.0), # Asymmetry parameter (scales depressing increments as alpha*lambda)
'lambda': 0.5 # Step size
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': random.uniform(low=1.0, high=1.3), # Distribution of delay values for connections
'weight': w_Glu, # Weight (power) of synapse
'Wmax': 20.}, **STDP_synapseparams) # Maximum allowed weight
# GABA synapse
STDP_synparams_GABA = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_GABA,
'Wmax': -20.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_ACh,
'Wmax': 20.}, **STDP_synapseparams)
# Dopamine excitatory synapse
DOPA_synparams_ex = dict({'delay': 1.,
'weight': w_DA_ex,
'Wmax': 100.})
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'delay': 1.,
'weight': w_DA_in,
'Wmax': -100.})
# Dictionary of synapses with keys and their parameters
synapses = {Glu: (glu_synapse, w_Glu ),
GABA: (gaba_synapse, w_GABA ),
ACh: (ach_synapse, w_ACh ),
DA_ex: (dopa_synapse_ex, w_DA_ex),
DA_in: (dopa_synapse_in, w_DA_in)
}
# Parameters for generator
static_syn = {
'weight': w_Glu * 5,
'delay': pg_delay
}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
|
research-team/robot-dream
|
direct_translation/scripts/step_1 (columns+dopa)/parameters.py
|
Python
|
mit
| 2,916
|
[
"NEURON"
] |
aee377d7a28bfb9879486f5bf5a46b9396b4cf65b66f95495cbce2d91025a2db
|
import os, sys
import yaml
from utils import *
def fetch_genome(reference_name):
"""
Downloads a reference genome and prepares for aligment
"""
from utils import script_dir
genome_list = yaml.load(open(script_dir + "/utils/genomes.yaml","r"))
makedir("genomes")
if reference_name not in genome_list:
msg("Reference Genome not available", "error")
ftp_loc = genome_list[reference_name]
filename = os.path.split(ftp_loc)[1]
makedir("{script_dir}/genomes/{reference_name}".format(**locals()))
reference_loc = "{script_dir}/genomes/{reference_name}/{filename}".format(**locals())
if not file_exists( reference_loc + ".sa"):
print("Downloading {filename}".format(**locals()))
os.system("curl {ftp_loc} > {script_dir}/genomes/{reference_name}/{filename}".format(**locals()))
# Unzip and rezip with bgzip
if filename.endswith(".gz"):
os.system("gunzip {reference_loc} && bgzip {reference_loc2}".format(reference_loc=reference_loc, reference_loc2=reference_loc.replace(".gz","")))
print("Indexing {script_dir}/genomes/{reference_name}/{filename}".format(**locals()))
os.system("bwa index {script_dir}/genomes/{reference_name}/{filename}".format(**locals()))
else:
msg("Reference Already downloaded and indexed.", "error")
def list_genomes():
"""
Prints a list of available genomes.
"""
genome_list = yaml.load(open(script_dir + "/utils/genomes.yaml","r"))
print("")
print("\033[1m%-30s\t%-30s\033[0m" % ("Reference Name", "Location"))
for k,v in genome_list.items():
print("%-30s\t%-30s" % (k, v))
print("")
|
AndersenLab/pyPipeline
|
utils/genomes.py
|
Python
|
mit
| 1,678
|
[
"BWA"
] |
ae43d1e4789198505c7f3147fd683d8bbe57668f6e6e45486b7469c8f8b5cfd5
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
""" pytest test configures """
import pytest
import os.path as osp
import numpy as np
import warnings
import sisl
pytestmark = [pytest.mark.io, pytest.mark.tbtrans]
_dir = osp.join('sisl', 'io', 'tbtrans')
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore", message="*.*.o2p")
def test_1_graphene_all_content(sisl_files):
""" This tests manifolds itself as:
sisl.geom.graphene(orthogonal=True).tile(3, 0).tile(5, 1)
All output is enabled:
### FDF ###
# Transmission related quantities
TBT.T.All T
TBT.T.Out T
TBT.T.Eig 2
# Density of states
TBT.DOS.Elecs T
TBT.DOS.Gf T
TBT.DOS.A T
TBT.DOS.A.All T
# Orbital currents and Crystal-Orbital investigations.
TBT.Symmetry.TimeReversal F
TBT.Current.Orb T
TBT.COOP.Gf T
TBT.COOP.A T
TBT.COHP.Gf T
TBT.COHP.A T
TBT.k [100 1 1]
### FDF ###
"""
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
assert tbt.E.min() > -2.
assert tbt.E.max() < 2.
# We have 400 energy-points
ne = len(tbt.E)
assert ne == 400
assert tbt.ne == ne
# We have 100 k-points
nk = len(tbt.kpt)
assert nk == 100
assert tbt.nk == nk
assert tbt.wk.sum() == pytest.approx(1.)
for i in range(ne):
assert tbt.Eindex(i) == i
assert tbt.Eindex(tbt.E[i]) == i
# Check raises
with pytest.warns(sisl.SislWarning):
tbt.Eindex(tbt.E.min() - 1.)
with pytest.warns(sisl.SislInfo):
tbt.Eindex(tbt.E.min() - 2e-3)
with pytest.warns(sisl.SislWarning):
tbt.kindex([0, 0, 0.5])
# Can't hit it
#with pytest.warns(sisl.SislInfo):
# tbt.kindex([0.0106, 0, 0])
for i in range(nk):
assert tbt.kindex(i) == i
assert tbt.kindex(tbt.kpt[i]) == i
# Get geometry
geom = tbt.geometry
geom_c1 = tbt.read_geometry(atoms=sisl.Atoms(sisl.Atom[6], geom.na))
geom_c2 = tbt.read_geometry(atoms=sisl.Atoms(sisl.Atom(6, orbs=2), geom.na))
assert geom_c1 == geom_c2
# Check read is the same as the direct query
assert tbt.na == geom.na
assert tbt.no == geom.no
assert tbt.no == geom.na
assert tbt.na == 3 * 5 * 4
assert np.allclose(tbt.cell, geom.cell)
# Check device atoms (1-orbital system)
assert tbt.na_d == tbt.no_d
assert tbt.na_d == 36 # 3 * 5 * 4 (and device is without electrodes, so 3 * 3 * 4)
assert len(tbt.pivot()) == 3 * 3 * 4 # 3 * 5 * 4 (and device is without electrodes, so 3 * 3 * 4)
assert len(tbt.pivot(in_device=True)) == len(tbt.pivot())
assert np.all(tbt.pivot(in_device=True, sort=True) == np.arange(tbt.no_d))
assert np.all(tbt.pivot(sort=True) == np.sort(tbt.pivot()))
# Just check they are there
assert tbt.n_btd() == len(tbt.btd())
# Check electrodes
assert len(tbt.elecs) == 2
elecs = tbt.elecs[:]
assert elecs == ['Left', 'Right']
for i, elec in enumerate(elecs):
assert tbt._elec(i) == elec
# Check the chemical potentials
for elec in elecs:
assert tbt.n_btd(elec) == len(tbt.btd(elec))
assert tbt.chemical_potential(elec) == pytest.approx(0.)
assert tbt.electron_temperature(elec) == pytest.approx(300., abs=1)
assert tbt.eta(elec) == pytest.approx(1e-4, abs=1e-6)
# Check electrode relevant stuff
left = elecs[0]
right = elecs[1]
# Assert we have transmission symmetry
assert np.allclose(tbt.transmission(left, right),
tbt.transmission(right, left))
assert np.allclose(tbt.transmission_eig(left, right),
tbt.transmission_eig(right, left))
# Check that the total transmission is larger than the sum of transmission eigenvalues
assert np.all(tbt.transmission(left, right) + 1e-7 >= tbt.transmission_eig(left, right).sum(-1))
assert np.all(tbt.transmission(right, left) + 1e-7 >= tbt.transmission_eig(right, left).sum(-1))
# Check that we can't retrieve from same to same electrode
with pytest.raises(ValueError):
tbt.transmission(left, left)
with pytest.raises(ValueError):
tbt.transmission_eig(left, left)
assert np.allclose(tbt.transmission(left, right, kavg=False),
tbt.transmission(right, left, kavg=False))
# Both methods should be identical for simple bulk systems
assert np.allclose(tbt.reflection(left), tbt.reflection(left, from_single=True), atol=1e-5)
# Also check for each k
for ik in range(nk):
assert np.allclose(tbt.transmission(left, right, ik),
tbt.transmission(right, left, ik))
assert np.allclose(tbt.transmission_eig(left, right, ik),
tbt.transmission_eig(right, left, ik))
assert np.all(tbt.transmission(left, right, ik) + 1e-7 >= tbt.transmission_eig(left, right, ik).sum(-1))
assert np.all(tbt.transmission(right, left, ik) + 1e-7 >= tbt.transmission_eig(right, left, ik).sum(-1))
assert np.allclose(tbt.DOS(kavg=ik), tbt.ADOS(left, kavg=ik) + tbt.ADOS(right, kavg=ik))
assert np.allclose(tbt.DOS(E=0.195, kavg=ik), tbt.ADOS(left, E=0.195, kavg=ik) + tbt.ADOS(right, E=0.195, kavg=ik))
# Check that norm returns correct values
assert tbt.norm() == 1
assert tbt.norm(norm='all') == tbt.no_d
assert tbt.norm(norm='atom') == tbt.norm(norm='orbital')
# Check atom is equivalent to orbital
for norm in ['atom', 'orbital']:
assert tbt.norm(0, norm=norm) == 0.
assert tbt.norm(3*4, norm=norm) == 1
assert tbt.norm(range(3*4, 3*5), norm=norm) == 3
# Assert sum(ADOS) == DOS
assert np.allclose(tbt.DOS(), tbt.ADOS(left) + tbt.ADOS(right))
assert np.allclose(tbt.DOS(sum=False), tbt.ADOS(left, sum=False) + tbt.ADOS(right, sum=False))
# Now check orbital resolved DOS
assert np.allclose(tbt.DOS(sum=False), tbt.ADOS(left, sum=False) + tbt.ADOS(right, sum=False))
# Current must be 0 when the chemical potentials are equal
assert tbt.current(left, right) == pytest.approx(0.)
assert tbt.current(right, left) == pytest.approx(0.)
high_low = tbt.current_parameter(left, 0.5, 0.0025, right, -0.5, 0.0025)
low_high = tbt.current_parameter(left, -0.5, 0.0025, right, 0.5, 0.0025)
assert high_low > 0.
assert low_high < 0.
assert - high_low == pytest.approx(low_high)
with pytest.warns(sisl.SislWarning):
tbt.current_parameter(left, -10., 0.0025, right, 10., 0.0025)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# Since this is a perfect system there should be *no* QM shot-noise
# Also, the shot-noise is related to the applied bias, so NO shot-noise
assert np.allclose((tbt.shot_noise(left, right, kavg=False) * tbt.wkpt.reshape(-1, 1)).sum(0), 0.)
assert np.allclose(tbt.shot_noise(left, right), 0.)
assert np.allclose(tbt.shot_noise(right, left), 0.)
assert np.allclose(tbt.shot_noise(left, right, kavg=1), 0.)
# Since the data-file does not contain all T-eigs (only the first two)
# we can't correctly calculate the fano factors
# This is a pristine system, hence all fano-factors should be more or less zero
# All transmissions are step-functions, however close to band-edges there is some
# smearing.
# When calculating the Fano factor it is extremely important that the zero_T is *sufficient*
# I don't now which value is *good*
assert np.all((tbt.fano(left, right, kavg=False) * tbt.wkpt.reshape(-1, 1)).sum(0) <= 1)
assert np.all(tbt.fano(left, right) <= 1)
assert np.all(tbt.fano(right, left) <= 1)
assert np.all(tbt.fano(left, right, kavg=0) <= 1)
# Neither should the noise_power exist
assert (tbt.noise_power(right, left, kavg=False) * tbt.wkpt).sum() == pytest.approx(0.)
assert tbt.noise_power(right, left) == pytest.approx(0.)
assert tbt.noise_power(right, left, kavg=0) == pytest.approx(0.)
# Check specific DOS queries
DOS = tbt.DOS
ADOS = tbt.ADOS
assert DOS(2, atoms=True, sum=False).size == geom.names['Device'].size
assert np.allclose(DOS(2, atoms='Device', sum=False), DOS(2, atoms=True, sum=False))
assert DOS(2, orbitals=True, sum=False).size == geom.a2o('Device', all=True).size
assert ADOS(left, 2, atoms=True, sum=False).size == geom.names['Device'].size
assert ADOS(left, 2, orbitals=True, sum=False).size == geom.a2o('Device', all=True).size
assert np.allclose(ADOS(left, 2, atoms='Device', sum=False), ADOS(left, 2, atoms=True, sum=False))
atoms = range(8, 40) # some in device, some not in device
for o in ['atoms', 'orbitals']:
opt = {o: atoms}
for E in [None, 2, 4]:
assert np.allclose(DOS(E), ADOS(left, E) + ADOS(right, E))
assert np.allclose(DOS(E, **opt), ADOS(left, E, **opt) + ADOS(right, E, **opt))
opt['sum'] = False
for E in [None, 2, 4]:
assert np.allclose(DOS(E), ADOS(left, E) + ADOS(right, E))
assert np.allclose(DOS(E, **opt), ADOS(left, E, **opt) + ADOS(right, E, **opt))
opt['sum'] = True
opt['norm'] = o[:-1]
for E in [None, 2, 4]:
assert np.allclose(DOS(E), ADOS(left, E) + ADOS(right, E))
assert np.allclose(DOS(E, **opt), ADOS(left, E, **opt) + ADOS(right, E, **opt))
opt['sum'] = False
for E in [None, 2, 4]:
assert np.allclose(DOS(E), ADOS(left, E) + ADOS(right, E))
assert np.allclose(DOS(E, **opt), ADOS(left, E, **opt) + ADOS(right, E, **opt))
# Check orbital currents
E = 201
# Sum of orbital current should be 0 (in == out)
orb_left = tbt.orbital_current(left, E)
orb_right = tbt.orbital_current(right, E)
assert orb_left.sum() == pytest.approx(0., abs=1e-7)
assert orb_right.sum() == pytest.approx(0., abs=1e-7)
d1 = np.arange(12, 24).reshape(-1, 1)
d2 = np.arange(24, 36).reshape(-1, 1)
assert orb_left[d1, d2.T].sum() == pytest.approx(tbt.transmission(left, right)[E])
assert orb_left[d1, d2.T].sum() == pytest.approx(-orb_left[d2, d1.T].sum())
assert orb_right[d2, d1.T].sum() == pytest.approx(tbt.transmission(right, left)[E])
assert orb_right[d2, d1.T].sum() == pytest.approx(-orb_right[d1, d2.T].sum())
orb_left.sort_indices()
atom_left = tbt.bond_current(left, E, only='all')
atom_left.sort_indices()
assert np.allclose(orb_left.data, atom_left.data)
assert np.allclose(orb_left.data, tbt.bond_current_from_orbital(orb_left, only='all').data)
orb_right.sort_indices()
atom_right = tbt.bond_current(right, E, only='all')
atom_right.sort_indices()
assert np.allclose(orb_right.data, atom_right.data)
assert np.allclose(orb_right.data, tbt.bond_current_from_orbital(orb_right, only='all').data)
# Calculate the atom current
# For 1-orbital systems the activity and non-activity are equivalent
assert np.allclose(tbt.atom_current(left, E), tbt.atom_current(left, E, activity=False))
tbt.vector_current(left, E)
assert np.allclose(tbt.vector_current_from_bond(atom_left) / 2, tbt.vector_current(left, E, only='all'))
# Check COOP curves
coop = tbt.orbital_COOP(E)
coop_l = tbt.orbital_ACOOP(left, E)
coop_r = tbt.orbital_ACOOP(right, E)
coop_lr = coop_l + coop_r
# Ensure aligment
coop.eliminate_zeros()
coop.sorted_indices()
coop_lr.eliminate_zeros()
coop_lr.sorted_indices()
assert np.allclose(coop.data, coop_lr.data)
coop = tbt.orbital_COOP(E, isc=[0, 0, 0])
coop_l = tbt.orbital_ACOOP(left, E, isc=[0, 0, 0])
coop_r = tbt.orbital_ACOOP(right, E, isc=[0, 0, 0])
coop_lr = coop_l + coop_r
coop.eliminate_zeros()
coop.sorted_indices()
coop_lr.eliminate_zeros()
coop_lr.sorted_indices()
assert np.allclose(coop.data, coop_lr.data)
coop = tbt.atom_COOP(E)
coop_l = tbt.atom_ACOOP(left, E)
coop_r = tbt.atom_ACOOP(right, E)
coop_lr = coop_l + coop_r
coop.eliminate_zeros()
coop.sorted_indices()
coop_lr.eliminate_zeros()
coop_lr.sorted_indices()
assert np.allclose(coop.data, coop_lr.data)
coop = tbt.atom_COOP(E, isc=[0, 0, 0])
coop_l = tbt.atom_ACOOP(left, E, isc=[0, 0, 0])
coop_r = tbt.atom_ACOOP(right, E, isc=[0, 0, 0])
coop_lr = coop_l + coop_r
coop.eliminate_zeros()
coop.sorted_indices()
coop_lr.eliminate_zeros()
coop_lr.sorted_indices()
assert np.allclose(coop.data, coop_lr.data)
# Check COHP curves
coop = tbt.orbital_COHP(E)
coop_l = tbt.orbital_ACOHP(left, E)
coop_r = tbt.orbital_ACOHP(right, E)
coop_lr = coop_l + coop_r
coop.eliminate_zeros()
coop.sorted_indices()
coop_lr.eliminate_zeros()
coop_lr.sorted_indices()
assert np.allclose(coop.data, coop_lr.data)
coop = tbt.atom_COHP(E)
coop_l = tbt.atom_ACOHP(left, E)
coop_r = tbt.atom_ACOHP(right, E)
coop_lr = coop_l + coop_r
coop.eliminate_zeros()
coop.sorted_indices()
coop_lr.eliminate_zeros()
coop_lr.sorted_indices()
assert np.allclose(coop.data, coop_lr.data)
# Simply print out information
tbt.info()
for elec in elecs:
tbt.info(elec)
@pytest.mark.slow
def test_1_graphene_all_tbtav(sisl_files, sisl_tmp):
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
f = sisl_tmp('1_graphene_all.TBT.AV.nc', _dir)
tbt.write_tbtav(f)
def test_1_graphene_all_fail_kavg(sisl_files, sisl_tmp):
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
with pytest.raises(ValueError):
tbt.transmission(kavg=[0, 1])
@pytest.mark.filterwarnings("ignore:.*requesting energy")
def test_1_graphene_all_fail_kavg_E(sisl_files, sisl_tmp):
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
with pytest.raises(ValueError):
tbt.orbital_COOP(kavg=[0, 1], E=0.1)
def test_1_graphene_all_ArgumentParser(sisl_files, sisl_tmp):
pytest.importorskip("matplotlib", reason="matplotlib not available")
import matplotlib as mpl
mpl.rcParams["text.usetex"] = False
# Local routine to run the collected actions
def run(ns):
ns._actions_run = True
# Run all so-far collected actions
for A, Aargs, Akwargs in ns._actions:
A(*Aargs, **Akwargs)
ns._actions_run = False
ns._actions = []
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
p, ns = tbt.ArgumentParser()
p.parse_args([], namespace=ns)
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--energy', ' -1.995:1.995'], namespace=ns)
assert not out._actions_run
run(out)
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--kpoint', '1'], namespace=ns)
assert out._krng
run(out)
assert out._krng == 1
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--norm', 'orbital'], namespace=ns)
run(out)
assert out._norm == 'orbital'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--norm', 'atom'], namespace=ns)
run(out)
assert out._norm == 'atom'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--kpoint', '1', '--norm', 'orbital'], namespace=ns)
run(out)
assert out._krng == 1
assert out._norm == 'orbital'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--atom', '10:11,14'], namespace=ns)
run(out)
assert out._Ovalue == '10:11,14'
# Only atom 14 is in the device region
assert np.all(out._Orng + 1 == [14])
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--atom', '10:11,12,14:20'], namespace=ns)
run(out)
assert out._Ovalue == '10:11,12,14:20'
# Only 13-48 is in the device
assert np.all(out._Orng + 1 == [14, 15, 16, 17, 18, 19, 20])
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--transmission', 'Left', 'Right'], namespace=ns)
run(out)
assert len(out._data) == 2
assert out._data_header[0][0] == 'E'
assert out._data_header[1][0] == 'T'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--transmission', 'Left', 'Right',
'--transmission-bulk', 'Left'], namespace=ns)
run(out)
assert len(out._data) == 3
assert out._data_header[0][0] == 'E'
assert out._data_header[1][0] == 'T'
assert out._data_header[2][:2] == 'BT'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--dos', '--dos', 'Left', '--ados', 'Right'], namespace=ns)
run(out)
assert len(out._data) == 4
assert out._data_header[0][0] == 'E'
assert out._data_header[1][0] == 'D'
assert out._data_header[2][:2] == 'AD'
assert out._data_header[3][:2] == 'AD'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--bulk-dos', 'Left', '--ados', 'Right'], namespace=ns)
run(out)
assert len(out._data) == 3
assert out._data_header[0][0] == 'E'
assert out._data_header[1][:2] == 'BD'
assert out._data_header[2][:2] == 'AD'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--transmission-eig', 'Left', 'Right'], namespace=ns)
run(out)
assert out._data_header[0][0] == 'E'
for i in range(1, len(out._data)):
assert out._data_header[i][:4] == 'Teig'
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--info'], namespace=ns)
# Test output
f = sisl_tmp('1_graphene_all.dat', _dir)
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--transmission-eig', 'Left', 'Right', '--out', f], namespace=ns)
assert len(out._data) == 0
f1 = sisl_tmp('1_graphene_all_1.dat', _dir)
f2 = sisl_tmp('1_graphene_all_2.dat', _dir)
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--transmission', 'Left', 'Right', '--out', f1,
'--dos', '--atom', '12:2:48', '--dos', 'Right', '--ados', 'Left', '--out', f2], namespace=ns)
d = sisl.io.tableSile(f1).read_data()
assert len(d) == 2
d = sisl.io.tableSile(f2).read_data()
assert len(d) == 4
assert np.allclose(d[1, :], (d[2, :] + d[3, :])* 2)
assert np.allclose(d[2, :], d[3, :])
f = sisl_tmp('1_graphene_all_T.png', _dir)
p, ns = tbt.ArgumentParser()
out = p.parse_args(['--transmission', 'Left', 'Right',
'--transmission-bulk', 'Left',
'--plot', f], namespace=ns)
# Requesting an orbital outside of the device region
def test_1_graphene_all_warn_orbital(sisl_files):
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
with pytest.warns(sisl.SislWarning):
tbt.o2p(1)
# Requesting an atom outside of the device region
def test_1_graphene_all_warn_atom(sisl_files):
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
with pytest.warns(sisl.SislWarning):
tbt.a2p(1)
def test_1_graphene_all_sparse_data_isc_request(sisl_files):
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
# get supercell with isc
sc = tbt.read_supercell()
# request the full matrix
for elec in [0, 1]:
J_all = tbt.orbital_current(elec, 204)
J_all.eliminate_zeros()
# Ensure we actually have something
assert J_all.nnz > 0
# partial summed isc
# Test that the full matrix and individual access is the same
J_sum = sum(tbt.orbital_current(elec, 204, isc=isc)
for isc in sc.sc_off)
assert J_sum.nnz == J_all.nnz
assert (J_sum - J_all).nnz == 0
def test_1_graphene_all_sparse_data_orbitals(sisl_files):
tbt = sisl.get_sile(sisl_files(_dir, '1_graphene_all.TBT.nc'))
# request the full matrix
J_all = tbt.orbital_current(0, 204)
J_12 = tbt.orbital_current(0, 204, orbitals=[2, 3])
assert J_12.nnz < J_all.nnz // 2
|
zerothi/sisl
|
sisl/io/tbtrans/tests/test_tbt.py
|
Python
|
mpl-2.0
| 19,984
|
[
"CRYSTAL"
] |
10efb15d45aa459a1dc50c435d74772007e1f122863b76d5ea9d4c26a27abb85
|
import sys
import numpy as np
from numpy import mean, cov, dot, transpose
from numpy import linalg as LA
import math
from scipy.sparse import csr_matrix
from __builtin__ import str
from copy import deepcopy
import matplotlib.pyplot as plt
from numpy.linalg import inv
#np.random.seed(0)
class Neuron(object):
"""Neuron Class
This class represents the neuron by a list of `Node`s. Borrowed from swc format, each node indicates a point on the neuron. each node has parent and children (at most two children) and set of all node with their parents make a tree structure; a connected graph without loop. The Soma represents by a few nodes and one of them is called root node and it's decendent of all the nodes in the neuron (including other soma nodes). Notice that all nodes from index 0 to index of `n_soma` in the `nodes_list` are soma.
This class contains the attributes to calculate different features of the neuron. The iput file can be a swc file or the list of nodes.
Parameters
-----------
file_format : string, optional (default=None)
input_file : string or list
if it is string, it will read the swc file from that address,
if it is list, the elements of the list should be object from Node's class
and corrsponding Tree is created.
all indexing for not-soma nodes (or the nodes that are very close to the soma) coming from self.nodes_list
Attributes
----------
n_soma : int
The number of the nodes that represents the soma.
n_node : int
The number of all the nodes in the neuron.
nodes_list : list of Node
The list of all the nodes in the neuron.
root : Node
The represented node of root.
location : array of shape = [3, n_node]
Three dimentional location of the nodes.
parent_index : array of shape = [n_node]
The index of the parent of each node in the nodes_list.
child_index : array of shape = [2, n_node]
first row: The index of first child of the node ( the ordering of the nodes are arrbitraty).
second row: nan if neuron is order oner and index of second child of the node if it's a branching node.
branch_order : array of shape = [n_node]
The number of children of the nodes. It can be and integer number for the root (first element) and only 0, 1 or 2 for other nodes.
ext_red_list : array of shape = [3, n_node]
first row: end points and order one nodes (for extension)
second row: end points (for removing)
third row: end point wich thier parents are order one nodes (for extension)
connection : array of shape = [n_node, n_node]
The matrix of connectivity of the nodes. The element (i,j) of the matrix is not np.nan if node i is a decendent of node j. The value at this array is the distance of j to its parent. It's useful for the calculation of the neural distance over Euclidain distance.
frustum : array of shape = [n_node] !!!NOT IMPLEMENTED!!!
The value of th fustum from the node toward its parent.
branch_order : array of shape = [n_node]
The number of children for each of nodes. Notice that for the nodes rther than root it should be 0, 1 or 2. For root it can be any integer number.
rall_ratio : array of shape = [n_node] !!!NOT IMPLEMENTED!!!
It's not nan only in branching nodes which its value is the rall ratio.
distance_from_root : array of shape = [n_node]
Euclidain distance toward the root.
distance_from_parent : array of shape = [n_node]
Euclidain distance toward the parent of the node.
slope : array of shape = [n_node]
ratio of euclidain distance toward the parent of the node over their diameter difference.
branch_angle : array of shape [3, n_nodes]
it shows the angles at the branching nodes: First row is the angle of two outward segments at the branching point Second and third rows are the angle betwen two outward segments and previous segment at the branching in arbitrary order (nan at other nodes).
angle_global : array of shape = [n_node]
The angle between the line linking the node to the root and the line likning it to its parent.
local_angle : array of shape = [n_node]
The angle between the line linking the node to its parent and its child and nan otherwise.
References
----------
.. [1] R.Farhoodi, K.P.Kording, "Generating Neuron Morphologies using naive Bayes MCMC"
"""
def __init__(self, file_format = None, input_file = None):
"""
Default constructor. There are three ways of representing the neuron.
In 'swc' the swc file in given and the output is a Neuron calss with all of its attributes.
In 'swc without attributes' the swc file in given and the output is a Neuron calss without its attributes. It's useful for the case that only nodes are important, e.g. visualization of the neurpn, in a fast way.
In 'only list of nodes' the list of all the nodes of the neuron is given.
"""
if(file_format == 'swc'): # swc is given
self.read_swc(input_file)
self.ratio_red_to_ext = 1.
self.n_node = len(self.nodes_list)
#self.set_n_soma_n_node()
self.set_parent()
self.parent_index = self.parent_index.astype(int)
#self.set_loc_diam()
self.fit()
if(file_format == 'swc without attributes'):
self.read_swc(input_file)
self.set_parent()
self.parent_index = self.parent_index.astype(int)
if(file_format == 'only list of nodes'):
self.nodes_list = input_file
self.root = self.find_root(self.nodes_list[0])
self.set_n_soma_n_node()
self.set_parent()
self.parent_index = self.parent_index.astype(int)
self.set_loc_diam()
self.fit()
if(file_format == 'Matrix of swc'):
# the n*7 array is given.
self.read_swc_matrix(input_file)
self.set_parent()
self.parent_index = self.parent_index.astype(int)
self.set_branch_order()
#self.set_sholl()
def __str__(self):
return "Neuron found with" + str(self.n_node) + " number of nodes and" + str(self.n_soma) + "number of node representing soma."
def fit(self):
"""
dependency:
self.nodes_list
self.n_soma
self.location
self.diameter
self.parent_index
self.child_index
self.root
self.n_nodes
output attributes are:
self.branch_order
self.connection
self.ext_red_list
self.rall_ratio
self.distance_from_root
self.distance_from_parent
self.branch_angle
self.global_angle
self.local_angle
self.frustum
"""
self.set_branch_order()
self.set_distance_from_root()
self.set_distance_from_parent()
self.set_connection2()
#self.set_rall_ratio()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
#self.sholl_r = np.array([0]) # the position of the jumps for sholl analysis
#self.sholl_n = np.array([0]) # the value at the jumping (the same size as self.sholl_x)
#self.set_sholl()
self.ratio_red_to_ext = 1.
self.set_ext_red_list()
self.set_features()
def set_n_soma_n_node(self):
self.n_soma = 0
for n in self.nodes_list:
if n.type is 'soma':
self.n_soma += 1
if(self.n_soma == 0): # for no soma representation
self.n_soma = 1
self.n_node = len(self.nodes_list)
def set_features(self):
self.features = {}
self.features['Nnodes'] = np.array([self.n_node - self.n_soma])
#self.features['asymetric']
(num_branches,) = np.where(self.branch_order[self.n_soma:] == 2)
self.features['Nbranch'] = np.array([len(num_branches)])
self.features['initial_segments'] = np.array([len(self.root.children)])
self.features['global_angle'] = np.pi - self.global_angle[self.n_soma:]
#self.features['diameter'] = self.diameter[self.n_soma:]
self.features['distance_from_parent'] = self.distance_from_parent[self.n_soma:]
self.features['distance_from_root'] = self.distance_from_root[self.n_soma:]
self.features['ratio_euclidian_neuronal'] = np.nansum(self.connection[self.n_soma:,self.n_soma:],axis = 1)/self.distance_from_root[self.n_soma:]
x = np.abs(self.branch_angle[0,self.n_soma:])
self.features['branch_angle'] = x[~np.isnan(x)]
x = self.local_angle[self.n_soma:]
self.features['local_angle'] = x[~np.isnan(x)]
if(len(self.features['local_angle'])==0):
self.features['local_angle'] = np.array([0])
if(len(self.features['branch_angle']) == 0):
self.features['branch_angle'] = np.array([0])
self.features['discrepancy_space'] = np.array([self.discrepancy(10.,10.,10.)])
#self.features['repellent'] = np.array([self.repellent(10.,10.,10.)])
self.features['curvature'] = self.set_curvature()
important_node = self.important_nodes()
parent_important = self.parent_index_for_node_subset(important_node)
(neural, euclidan) = self.set_neural_euclid_branch(important_node, parent_important)
self.features['neural_important'] = neural
self.features['euclidian_important'] = euclidan
self.features['ratio_neural_euclidian_important'] = neural/euclidan
self.features['branch_angle_segment'] = self.set_branch_angle_segment(important_node, parent_important)
def important_nodes(self):
(branch_index,) = np.where(self.branch_order[self.n_soma:]==2)
(end_nodes,) = np.where(self.branch_order[self.n_soma:]==0)
important_node = np.append(branch_index,end_nodes)
if(len(important_node)!=0):
important_node = self.n_soma + important_node
return important_node
def set_neural_euclid_branch(self, important_node, thier_parents):
neural = np.array([])
euclidan = np.array([])
for i in range(important_node.shape[0]):
neural_length = self.distance(important_node[i],thier_parents[i])
euclidan_length = LA.norm(self.location[:,important_node[i]] - self.location[:,thier_parents[i]],2)
neural = np.append(neural, neural_length)
euclidan = np.append(euclidan, euclidan_length)
return neural, euclidan
def discrepancy(self, x_mesh, y_mesh, z_mesh):
X = self.normlize(self.location[0,:],x_mesh)
Y = self.normlize(self.location[1,:],y_mesh)
Z = self.normlize(self.location[2,:],z_mesh)
L = X + x_mesh*Y + x_mesh*y_mesh*Z
a, N = np.unique(L, return_counts=True)
return len(a) # np.flipud(np.sort(N))
def repellent(self, x_mesh, y_mesh, z_mesh):
X = self.normlize(self.location[0,:],x_mesh)
Y = self.normlize(self.location[1,:],y_mesh)
Z = self.normlize(self.location[2,:],z_mesh)
L = X + x_mesh*Y + x_mesh*y_mesh*Z
a, N = np.unique(L, return_counts=True)
return len(a) # np.flipud(np.sort(N))
def normlize(self, vector, mesh):
m = min(vector)
M = max(vector)
if(M==m):
a = 0
else:
a = np.floor(mesh*((vector - m)/(M-m)))
return a
def set_branch_order(self):
# terminal = 0, passig (non of them) = 1, branch = 2
"""
dependency:
nodes_list
"""
self.branch_order = np.array([])
for n in self.nodes_list:
self.branch_order = np.append(self.branch_order, len(n.children))
def set_ratio_red_to_ext(self,c):
self.ratio_red_to_ext = c
def set_ext_red_list(self):
"""
In the extension-reduction perturbation, one of the node will be removed or one node will be added. In the first case, the node can only be
an end point, but in the second case the new node might be added to any node that has one or zero child.
dependency:
self.nodes_list
self.branch_order
self.n_soma
self.ratio_red_to_ext
ext_red_list:
first row: end points and order one nodes (for extension)
second row: end points (for removing)
third row: end point wich thier parents are order one nodes (for extension)
Remarks:
1) The list is zero for the soma nodes.
2) The value for first and second is binary but the third row is self.ratio_red_to_ext
"""
(I,) = np.where(self.branch_order[self.n_soma:] == 0)
I = I + self.n_soma
self.ext_red_list = np.zeros((3, self.n_node))
self.ext_red_list[0,I] = 1
self.ext_red_list[0,np.where(self.branch_order == 1)] = 1
self.ext_red_list[1,I] = self.ratio_red_to_ext
J = np.array([])
for i in I:
if(len((self.nodes_list[i].parent).children) == 1):
J = np.append(J,i)
J = np.array(J, dtype = int)
self.ext_red_list[2,J] = 1
self.ext_red_list.astype(int)
self.ext_red_list[:,0:self.n_soma] = 0
def set_distance_from_root(self):
"""
dependency:
self.location
"""
self.distance_from_root = np.sqrt(sum(self.location ** 2))
def set_distance_from_parent(self):
"""
given:
self.location
self.parent_index
"""
a = (self.location - self.location[:,self.parent_index.astype(int)]) ** 2
self.distance_from_parent = np.sqrt(sum(a))
def set_branch_angle_segment(self, important_node, parent_important):
I = np.array([])
for i in important_node:
(J,) = np.where(parent_important == i)
if(len(J) == 2):
vec0 = np.expand_dims(self.location[:,important_node[J[0]]] - self.location[:,i], axis = 1)
vec1 = np.expand_dims(self.location[:,important_node[J[1]]] - self.location[:,i], axis = 1)
I = np.append(I,self.angle_vec_matrix(vec0,vec1))
return I
def set_branch_angle(self):
"""
An array with size [3, n_nodes] and shows the angles at the branching nodes:
First row is the angle of two outward segments at the branching point
Second and third rows are the angle betwen two outward segments and previous segment at the branching in arbitrary order (nan at other nodes).
dependency:
self.nodes_list
self.branch_order
self.location
self.parent_index
self.child_index
self.n_soma
"""
self.branch_angle = np.nan*np.zeros([3,self.n_node])
(I,) = np.where(self.branch_order == 2)
I = I[I>self.n_soma]
vec0 = self.location[:,self.child_index[0,I].astype(int)] - self.location[:,I]
vec1 = self.location[:,self.child_index[1,I].astype(int)] - self.location[:,I]
vec2 = self.location[:,self.parent_index[I].astype(int)] - self.location[:,I]
self.branch_angle[0,I] = self.angle_vec_matrix(vec0,vec1)
self.branch_angle[1,I] = self.angle_vec_matrix(vec0,vec2)
self.branch_angle[2,I] = self.angle_vec_matrix(vec2,vec1)
def set_global_angle(self):
"""
dependency:
sefl.location
self.parent_index
self.n_soma
"""
dir = self.location - self.location[:,self.parent_index.astype(int)]
self.global_angle = self.angle_vec_matrix(self.location, dir)
def set_local_angle(self):
"""
dependency:
self.location
self.n_soma
self.branch_order
self.parent_index
"""
self.local_angle = np.nan*np.ones(self.n_node)
(I,) = np.where(self.branch_order[self.n_soma:] == 1)
I = I + self.n_soma
dir1 = self.location[:,I] - self.location[:,self.parent_index[I].astype(int)]
dir2 = self.location[:,I] - self.location[:,self.child_index[0,I].astype(int)]
self.local_angle[I] = self.angle_vec_matrix(dir1, dir2)
def set_frustum(self):
"""
dependency:
self.distance_from_parent
self.n_soma
self.diameter
self.parent_index
"""
self.frustum = np.array([0])
l = self.distance_from_parent[self.n_soma:]
r = self.diameter[self.n_soma:]
R = self.diameter[self.parent_index][self.n_soma:]
f = (np.pi/3.0)*l*(r ** 2 + R ** 2 + r * R)
self.frustum = np.append(np.zeros(self.n_soma), f)
def set_curvature(self):
par = self.parent_index
papar = par[par]
papapar = par[par[par]]
dir1 = self.location[:,par] - self.location
dir2 = self.location[:,papar] - self.location[:,par]
dir3 = self.location[:,papapar] - self.location[:,papar]
cros1 = np.cross(np.transpose(dir1), np.transpose(dir2))
cros2 = np.cross(np.transpose(dir2), np.transpose(dir3))
I = self.angle_vec_matrix(np.transpose(cros1), np.transpose(cros2))
return I[self.n_soma:]
def set_rall_ratio(self):
"""
dependency:
self.diameter
self.child_index
self.n_soma
self.n_node
"""
self.rall_ratio = np.nan*np.ones(self.n_node)
(I,) = np.where(self.branch_order[self.n_soma:] == 2)
ch1 = np.power(self.diameter[self.child_index[0,I]],2./3.)
ch2 = np.power(self.diameter[self.child_index[1,I]],2./3.)
n = np.power(self.diameter[I],2./3.)
self.rall_ratio[I] = (ch1+ch2)/n
def set_values_ite(self):
"""
set iteratively the following attributes:
parent_index
child_index
location
diameter
rall_ratio
distance_from_root
distance_from_parent
slope
branch_angle
branch_order
"""
self.parent_index = np.zeros(self.n_soma)
self.child_index = np.nan * np.ones([2,self.n_soma])
for n in self.nodes_list[1:]:
self.location = np.append(self.location, n.xyz.reshape([3,1]), axis = 1)
self.diameter = np.append(self.diameter, n.r)
for n in self.nodes_list[1:]:
#self.frustum = np.append(self.frustum, self.calculate_frustum(n))
#self.rall_ratio = np.append(self.rall_ratio, self.calculate_rall(n))
self.distance_from_root = np.append(self.distance_from_root, self.calculate_distance_from_root(n))
self.distance_from_parent = np.append(self.distance_from_parent, self.calculate_distance_from_parent(n))
#self.slope = np.append(self.slope, self.calculate_slope(n))
ang, ang1, ang2 = self.calculate_branch_angle(n)
an = np.zeros([3,1])
an[0,0] = ang
an[1,0] = ang1
an[2,0] = ang2
if(self.branch_angle.shape[1] == 0):
self.branch_angle = an
else:
self.branch_angle = np.append(self.branch_angle, an, axis = 1)
glob_ang, local_ang = self.calculate_node_angles(n)
self.global_angle = np.append(self.global_angle, glob_ang)
self.local_angle = np.append(self.local_angle, local_ang)
#self.neural_distance_from_soma = np.append(self.neural_distance_from_soma, self.calculate_neural_distance_from_soma(n))
for n in self.nodes_list[self.n_soma:]:
self.parent_index = np.append(self.parent_index, self.get_index_for_no_soma_node(n.parent))
if(self.branch_order[self.get_index_for_no_soma_node(n)]==2):
a = np.array([self.get_index_for_no_soma_node(n.children[0]),self.get_index_for_no_soma_node(n.children[1])]).reshape(2,1)
self.child_index = np.append(self.child_index, a, axis = 1)
if(self.branch_order[self.get_index_for_no_soma_node(n)]==1):
a = np.array([self.get_index_for_no_soma_node(n.children[0]),np.nan]).reshape(2,1)
self.child_index = np.append(self.child_index, a, axis = 1)
if(self.branch_order[self.get_index_for_no_soma_node(n)]==0):
a = np.array([np.nan,np.nan]).reshape(2,1)
self.child_index = np.append(self.child_index, a, axis = 1)
def set_parent(self):
self.parent_index = np.zeros(self.n_soma)
self.child_index = np.zeros([2,self.n_node])
for n in self.nodes_list[self.n_soma:]:
par = self.get_index_for_no_soma_node(n.parent)
node = self.get_index_for_no_soma_node(n)
self.parent_index = np.append(self.parent_index, par)
if self.child_index[0,par] != 0:
self.child_index[1,par] = node
else:
self.child_index[0,par] = node
self.child_index[self.child_index == 0] = np.nan
self.child_index[:,0:self.n_soma] = np.nan
#self.parent_index.astype(int)
def set_loc_diam(self):
self.location = np.zeros([3,self.n_node])
self.diameter = np.zeros(self.n_node)
for n in range(self.n_node):
self.location[:,n] = self.nodes_list[n].xyz
self.diameter[n] = self.nodes_list[n].r
def set_connection2(self):
"""
dependency:
self.nodes_list
self.n_soma
self.n_node
self.parent_index
self.distance_from_parent
"""
connection = np.zeros([self.n_node,self.n_node]) # the connectivity matrix
connection[np.arange(self.n_node), self.parent_index.astype(int)] = 1
connection[0,0] = 0
connection = inv(np.eye(self.n_node) - connection)
connection[connection != 1] = np.nan
for i in range(self.n_node):
(J,) = np.where(~np.isnan(connection[:,i]))
connection[J,i] = self.distance_from_parent[i]
connection[:,0] = 1
connection[np.arange(self.n_soma),np.arange(self.n_soma)] = 1
self.connection = connection
#return connection
def set_connection(self):
"""
connection is an array with size [n_node, n_node]. The element (i,j) is not np.nan if
node i is a decendent of node j. The value at this array is the distance of j to its parent.
dependency:
self.nodes_list
self.n_soma
self.parent_index
self.distance_from_parent
"""
self.parent_index = np.array(self.parent_index, dtype = int)
L = self.n_node - self.n_soma
C = csr_matrix((np.ones(L),(range(self.n_soma,self.n_node), self.parent_index[self.n_soma:])), shape = (self.n_node,self.n_node))
self.connection = np.zeros([self.n_node,self.n_node]) # the connectivity matrix
new = 0
i = 0
old = C.sum()
while(new != old):
self.connection = C.dot(csr_matrix(self.connection)) + C
old = new
new = self.connection.sum()
self.connection = self.connection.toarray()
self.connection[range(1,self.n_node),range(1,self.n_node)] = 1
self.connection[:,:self.n_soma] = 0
# fill the matrix with the distance
for i in range(self.n_node):
self.connection[self.connection[:,i] != 0,i] = self.distance_from_parent[i]
self.connection[self.connection == 0] = np.nan
def set_sholl(self):
self.sholl_r = np.array([])
for n in self.nodes_list:
dis = LA.norm(self.xyz(n) - self.root.xyz,2)
self.sholl_r = np.append(self.sholl_r, dis)
self.sholl_r = np.sort(np.array(self.sholl_r))
self.sholl_n = np.zeros(self.sholl_r.shape)
for n in self.nodes_list:
if(n.parent != None):
par = n.parent
dis_par = LA.norm(self.xyz(par) - self.root.xyz,2)
dis_n = LA.norm(self.xyz(par) - self.root.xyz,2)
M = max(dis_par, dis_n)
m = min(dis_par, dis_n)
I = np.logical_and(self.sholl_r>=m, self.sholl_r<=M)
self.sholl_n[I] = self.sholl_n[I] + 1
def xyz(self, node):
return self.location[:,self.get_index_for_no_soma_node(node)]
def _r(self, node):
return self.diameter[self.get_index_for_no_soma_node(node)]
def parent_index_for_node_subset(self, subset):
"""
inputs
------
index of subset of the nodes without root node
output
------
Index of grand parent inside of the subset for each member of subset
"""
if((subset==0).sum() == 0):
subset = np.append(0,subset)
n = subset.shape[0]
self.connection[:,0] = 1.
self.connection[np.arange(self.n_soma),np.arange(self.n_soma)] = 1.
A = self.connection[np.ix_(subset,subset)]
A[np.isnan(A)] = 0
A[A!=0] = 1.
B = np.eye(subset.shape[0]) - inv(A)
return subset[np.where(B==1)[1]]
def distance(self, index1, index2):
"""
Neural distance between two nodes in the neuron.
inputs
------
index1, index2 : the indecies of the nodes.
output
------
the neural distance between the node.
"""
return min(self.distance_two_node_up_down(index1,index2),self.distance_two_node_up_down(index2,index1))
def distance_two_node_up_down(self, Upindex, Downindex):
(up,) = np.where(~np.isnan(self.connection[Downindex,:]))
(down,) = np.where(~np.isnan(self.connection[:,Upindex]))
I = np.intersect1d(up,down)
if(I.shape[0] != 0):
return sum(self.distance_from_parent[I]) - self.distance_from_parent[Upindex]
else:
return np.inf
def calculate_overall_matrix(self, node):
j = self.get_index_for_no_soma_node(node)
k = self.get_index_for_no_soma_node(node.parent)
(J,) = np.where(~ np.isnan(self.connection[:,j]))
dis = LA.norm(self.location[:,k] - self.location[:,j],2)
self.connection[J,j] = dis
def calculate_branch_order(self,node):
"""
terminal = 0, passig (non of them) = 1, branch = 2
"""
return len(node.children)
def calculate_frustum(self,node):
"""
the Volume of the frustum ( the node with its parent) at each location. (nan for the nodes of soma)
"""
r = self._r(node)
r_par = self._r(node.parent)
dis = LA.norm(self.xyz(node) - self.xyz(node.parent) ,2)
f = dis*(np.pi/3.0)*(r*r + r*r_par + r_par*r_par)
return f
def calculate_rall(self,node):
if(len(node.children) == 2):
n1, n2 = node.children
r1 = self._r(n1)
r2 = self._r(n2)
r = self._r(node)
rall = (np.power(r1,2.0/3.0)+(np.power(r2,2.0/3.0)))/np.power(r,2.0/3.0)
else:
rall = np.nan
return rall
def calculate_distance_from_root(self,node):
return LA.norm(self.xyz(node) - self.root.xyz,2)
def calculate_distance_from_parent(self,node):
return LA.norm(self.xyz(node) - self.xyz(node.parent),2)
def calculate_slope(self,node):
# the ratio of: delta(pos)/delta(radius)
dis = LA.norm(self.xyz(node) - self.xyz(node.parent),2)
rad = node.r - node.parent.r
if(dis == 0):
val = rad
else:
val = rad/dis
return val
def calculate_branch_angle(self,node):
# the mean of the angle betwen two outward segments and previous segment at the branching (nan at other nodes)
if(len(node.children) == 2):
n1, n2 = node.children
nodexyz = self.xyz(node)
node_parxyz = self.xyz(node.parent)
node_chixyz1 = self.xyz(n1)
node_chixyz2 = self.xyz(n2)
vec = node_parxyz - nodexyz
vec1 = node_chixyz1 - nodexyz
vec2 = node_chixyz2 - nodexyz
ang = self.angle_vec(vec1,vec2) # the angle of two outward segments at the branching point (nan for non-branchings)
ang1 = self.angle_vec(vec1,vec)
ang2 = self.angle_vec(vec2,vec)
else:
ang = np.nan
ang1 = np.nan
ang2 = np.nan
return ang, ang1, ang2
def calculate_node_angles(self,node):
par = node.parent
nodexyz = self.xyz(node)
node_parxyz = self.xyz(node.parent)
vec1 = node_parxyz - nodexyz
vec2 = self.root.xyz - nodexyz
glob_ang = self.angle_vec(vec1,vec2)
if(node.children != None):
if(len(node.children) ==1):
[child] = node.children
vec3 = self.xyz(child) - nodexyz
local_ang = self.angle_vec(vec1,vec3)
else:
local_ang = np.nan
else:
local_ang = np.nan
return glob_ang, local_ang
# Axulary functions
def angle_vec_matrix(self,matrix1,matrix2):
"""
Takes two matrix 3*n of matrix1 and matrix2 and gives back
the angles for each corresponding n vectors.
Note: if the norm of one of the vectors is zeros the angle is np.pi
"""
ang = np.zeros(matrix1.shape[1])
norm1 = LA.norm(matrix1, axis = 0)
norm2 = LA.norm(matrix2, axis = 0)
domin = norm1*norm2
(J,) = np.where(domin != 0)
ang[J] = np.arccos(np.maximum(np.minimum(sum(matrix1[:,J]*matrix2[:,J])/domin[J],1),-1))
return ang
def angle_vec(self,vec1,vec2):
val = sum(vec1*vec2)/(LA.norm(vec1,2)*LA.norm(vec2,2))
if(LA.norm(vec1,2)==0 or LA.norm(vec2,2) == 0):
val = -1
return math.acos(max(min(val,1),-1))
def choose_random_node_index(self):
n = np.floor((self.n_node-self.n_soma)*np.random.random_sample()).astype(int)
return n + self.n_soma
def p_ext_red_whole(self):
"""
Thos function gives back the probabiliy of the chossing one of the node add_node
extend it.
"""
return self.ext_red_list[0:2,:].sum()+1 # 1 added because the root may extend
def p_ext_red_end_point(self):
"""
Those function gives back the probabiliy of the chossing one of the node add_node
extend it.
"""
return self.ext_red_list[1:3,:].sum()
def get_index_for_no_soma_node(self,node):
return self.nodes_list.index(node)
def _list_for_local_update(self,node):
"""
Return the index of node, its parent and any children it may have.
The node should be a no_soma node
"""
update_list = np.array([]) # index of all nodes for update
update_list = np.append(update_list, self.get_index_for_no_soma_node(node))
if(node.parent.type != 'soma'):
update_list = np.append(update_list, self.get_index_for_no_soma_node(node.parent)) # if the node doesnt have a parent in no_soma list, i.e. its parent is a soma, get_index would return nothing
if(node.children != None):
for n in node.children:
update_list = np.append(update_list, self.get_index_for_no_soma_node(n))
return update_list.astype(int)
def _update_attribute(self,update_list):
for ind in update_list:
#self.frustum[ind] = self.calculate_frustum(self.nodes_list[ind])
#self.rall_ratio[ind] = self.calculate_rall(self.nodes_list[ind])
self.distance_from_root[ind] = self.calculate_distance_from_root(self.nodes_list[ind])
self.distance_from_parent[ind] = self.calculate_distance_from_parent(self.nodes_list[ind])
#self.slope[ind] = self.calculate_slope(self.nodes_list[ind])
self.branch_order[ind] = self.calculate_branch_order(self.nodes_list[ind])
ang, ang1, ang2 = self.calculate_branch_angle(self.nodes_list[ind])
self.branch_angle[0, ind] = ang
self.branch_angle[1, ind] = ang1
self.branch_angle[2, ind] = ang2
ang1, ang2 = self.calculate_node_angles(self.nodes_list[ind])
self.global_angle[ind] = ang1
self.local_angle[ind] = ang2
self.calculate_overall_matrix(self.nodes_list[ind])
#self.sholl_r = np.array([]) # the position of the jumps for sholl analysis
#self.sholl_n = np.array([]) # the value at the jumping (the same size as self.sholl_x)
def change_location(self,index,displace):
"""
Change the location of one of the node in the neuron updates the attribute accordingly.
Parameters:
___________
index: the index of node in no_soma_list to change its diameter
displace: the location of new node is the xyz of the current locatin + displace
"""
# First change the location of the node by displace
node = self.nodes_list[index]
self.location[:,index] += displace
self._update_attribute(self._list_for_local_update(node))
self.set_features()
def change_location_toward_end_nodes(self,index,displace):
(I,) = np.where(~np.isnan(self.connection[:,index]))
self.location[0,I] += displace[0]
self.location[1,I] += displace[1]
self.location[2,I] += displace[2]
self.set_distance_from_root()
self.set_distance_from_parent()
self.connection[np.ix_(I,[index])] = self.distance_from_parent[index]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def change_location_important(self, index, displace):
(branch_index,) = np.where(self.branch_order[self.n_soma:]==2)
(end_nodes,) = np.where(self.branch_order[self.n_soma:]==0)
branch_index += self.n_soma
end_nodes += self.n_soma
I = np.append(branch_index, end_nodes)
parents = self.parent_index_for_node_subset(I)
(ind,) = np.where(I == index)
origin = deepcopy(self.location[:,index])
# correct the segment to the parent
par = parents[ind][0]
(up,) = np.where(~np.isnan(self.connection[index,:]))
(down,) = np.where(~np.isnan(self.connection[:,par]))
J = np.intersect1d(up,down)
A = self.location[:,J]
loc = self.location[:,par]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r1 = origin - loc
r2 = r1 + displace
M = self.scalar_rotation_matrix_to_map_two_vector(r1, r2)
A = np.dot(M,A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,J] = A
changed_ind = J
# correct the children
(ch,) = np.where(parents == index)
for i in I[ch]:
(up,) = np.where(~np.isnan(self.connection[i,:]))
(down,) = np.where(~np.isnan(self.connection[:,index]))
J = np.intersect1d(up,down)
A = self.location[:,J]
loc = self.location[:,i]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r1 = origin - loc
r2 = r1 + displace
M = self.scalar_rotation_matrix_to_map_two_vector( r1, r2)
A = np.dot(M,A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,J] = A
changed_ind = np.append(changed_ind, J)
self.location[:,index] = origin + displace
self.set_distance_from_root()
self.set_distance_from_parent()
for i in changed_ind:
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def scalar_rotation_matrix_to_map_two_vector(self, v1, v2):
r1 = LA.norm(v1,2)
norm1 = v1/r1
r2 = LA.norm(v2,2)
normal2 = v2/r2
a = sum(normal2*norm1)
theta = -np.arccos(a)
normal2 = normal2 - a*norm1
norm2 = normal2/LA.norm(normal2,2)
cross = np.cross(norm1, norm2)
B = np.zeros([3,3])
B[:,0] = norm1
B[:,1] = norm2
B[:,2] = cross
A = np.eye(3)
A[0,0] = np.cos(theta)
A[1,0] = - np.sin(theta)
A[0,1] = np.sin(theta)
A[1,1] = np.cos(theta)
return (r2/r1) * np.dot(np.dot(B,A),inv(B))
def change_diameter(self,index,ratio):
"""
Change the diameter of one node in the neuron updates the attribute accordingly.
Parameters:
___________
index: the index of node in no_soma_list to change its diameter
ratio: the radius of new node is the radius of current node times ratio
"""
node = self.nodes_list[index]
node.r = ratio*node.r
r = node.r
self.diameter[index] = r
self._update_attribute(self._list_for_local_update(node))
self.set_features()
def rescale_toward_end(self,node, rescale):
"""
Rescale the part of neuron form the node toward the end nodes.
input
-----
node : `Node` class
the node of the neuron which the location of other nodes in the neuron have it as thier decendent would be changed.
rescale : positive float
The value to rescale the part of the neuron.
"""
index = self.get_index_for_no_soma_node(node)
(I,) = np.where(~np.isnan(self.connection[:,index]))
A = self.location[:,I]
loc = self.xyz(node)
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
A = rescale*A
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_distance_from_parent()
I = I.tolist()
I.remove(index)
I = np.array(I,dtype = int)
self.connection[:,I] *= rescale
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def rotate(self, node, matrix):
"""
Rotate the neuron around the parent of the node with the given matrix.
The attribute to update:
location
distance_from_root
branch_angle
angle_global
local_angle
"""
# set of nodes under parent of node
par = node.parent
(I,) = np.where(~np.isnan(self.connection[:,self.get_index_for_no_soma_node(par)]))
A = self.location[:,I]
loc = self.xyz(par)
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
A = np.dot(matrix, A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def rotate_from_branch(self, node, matrix):
branch_index = self.get_index_for_no_soma_node(node.parent)
(I,) = np.where(~np.isnan(self.connection[:,self.get_index_for_no_soma_node(node)]))
#I = np.append(I, branch_index)
A = self.location[:,I]
loc = self.xyz(node.parent)
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
A = np.dot(matrix, A)
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def remove_node(self, index):
"""
Removes a non-soma node from the neuron and updates the features
Parameters
-----------
Node : the index of the node in the no_soma_list
the node should be one of the end-points, otherwise gives an error
"""
self.n_node -= 1
node = self.nodes_list[index]
parent_index = self.get_index_for_no_soma_node(node.parent)
# details of the removed node for return
p = node.parent
node.parent.remove_child(node)
l = self.location[:,index] - self.location[:,parent_index]
r = self.diameter[index]/self.diameter[parent_index]
self.location = np.delete(self.location,index, axis = 1)
self.nodes_list.remove(node)
self.branch_order = np.delete(self.branch_order,index)
new_parent_index = self.get_index_for_no_soma_node(p)
self.branch_order[new_parent_index] -= 1
self.diameter = np.delete(self.diameter,index)
#self.frustum = np.delete(self.frustum,index)
#self.rall_ratio = np.delete(self.rall_ratio,index)
self.distance_from_root = np.delete(self.distance_from_root,index)
self.distance_from_parent = np.delete(self.distance_from_parent,index)
#self.slope = np.delete(self.slope,index)
self.branch_angle = np.delete(self.branch_angle,index, axis = 1)
self.global_angle = np.delete(self.global_angle,index)
self.local_angle = np.delete(self.local_angle,index)
self.connection = np.delete(self.connection,index, axis = 0)
self.connection = np.delete(self.connection,index, axis = 1)
self.parent_index = np.delete(self.parent_index,index)
I = np.where(self.parent_index > index)
self.parent_index[I] -= 1
self.child_index = np.delete(self.child_index,index,axis = 1)
I , J = np.where(self.child_index > index)
self.child_index[I,J] -= 1
if p.type is not 'soma':
if len(p.children) == 1:
self.branch_angle[0,new_parent_index] = np.nan
self.branch_angle[1,new_parent_index] = np.nan
self.branch_angle[2,new_parent_index] = np.nan
gol, loc = self.calculate_node_angles(self.nodes_list[new_parent_index])
self.child_index[:,new_parent_index] = np.array([self.get_index_for_no_soma_node(p.children[0]), np.nan])
self.local_angle[new_parent_index] = loc
if len(p.children) == 0:
self.local_angle[new_parent_index] = np.nan
self.child_index[:,new_parent_index] = np.array([np.nan, np.nan])
#self.sholl_r = None # the position of the jumps for sholl analysis
#self.sholl_n = None # the value at the jumping (the same size as self.sholl_x)
self.set_ext_red_list()
self.set_features()
return p, l, r
def extend_node(self,parent,location,ratio):
"""
Extend the neuron by adding one end point and updates the attribute for the new neuron.
Parameters:
___________
Parent: the node that the extended node attached to
location: the xyz of new node is the sum of location and xyz of parent
ratio: the radius of new node is the radius of parent times ratio
"""
self.n_node += 1
if parent is 'soma':
parent = self.root
n = Node()
in_par = self.get_index_for_no_soma_node(parent)
n.type = 'apical'
R = ratio * self.diameter[in_par]
n.parent = parent
parent.add_child(n)
self.location = np.append(self.location, (self.location[:,in_par] + location).reshape([3,1]), axis = 1)
self.diameter = np.append(self.diameter, R)
self.nodes_list.append(n)
#self.frustum = np.append(self.frustum,np.nan)
self.branch_order = np.append(self.branch_order ,0)
self.branch_order[self.get_index_for_no_soma_node(parent)] += 1
#self.rall_ratio = np.append(self.rall_ratio ,np.nan)
self.distance_from_root = np.append(self.distance_from_root,np.nan)
self.distance_from_parent = np.append(self.distance_from_parent ,np.nan)
#self.slope = np.append(self.slope ,np.nan)
if(self.branch_angle.shape[1] == 0):
self.branch_angle = np.nan*np.ones([3,1])
else:
self.branch_angle = np.append(self.branch_angle, np.nan*np.ones([3,1]), axis = 1)
self.global_angle = np.append(self.global_angle ,np.nan)
self.local_angle = np.append(self.local_angle ,np.nan)
l = self.connection.shape[0]
I = np.nan*np.zeros([1,l])
(J,) = np.where(~np.isnan(self.connection[self.get_index_for_no_soma_node(parent),:]))
I[0,J] = self.connection[self.get_index_for_no_soma_node(parent),J]
self.connection = np.append(self.connection, I , axis = 0)
self.connection = np.append(self.connection, np.nan*np.zeros([l+1,1]), axis = 1)
self.connection[l,l] = LA.norm(location,2)
self.parent_index = np.append(self.parent_index, self.get_index_for_no_soma_node(parent))
self.child_index = np.append(self.child_index,np.array([np.nan, np.nan]).reshape(2,1), axis = 1)
if parent.type is not 'soma':
if(len(parent.children) == 1):
self.child_index[:,self.get_index_for_no_soma_node(parent)] = np.array([self.get_index_for_no_soma_node(n), np.nan])
if(len(parent.children) == 2):
self.child_index[1,self.get_index_for_no_soma_node(parent)] = self.get_index_for_no_soma_node(n)
update_list = self._list_for_local_update(n)
self._update_attribute(update_list)
self.set_ext_red_list()
self.set_features()
return self.get_index_for_no_soma_node(n)
def add_extra_node(self, node):
print 1
def slide(self, moving_node_index, no_branch_node_index):
"""
"""
# adjust nodes
moving_node = self.nodes_list[moving_node_index]
no_branch_node = self.nodes_list[no_branch_node_index]
parent_moving_node = moving_node.parent
parent_moving_node_index = self.get_index_for_no_soma_node(parent_moving_node)
parent_moving_node.remove_child(moving_node)
moving_node.parent = no_branch_node
no_branch_node.add_child(moving_node)
# adjust parent_index and child_index
self.parent_index[moving_node_index] = no_branch_node_index
a = self.child_index[:,parent_moving_node_index]
if(self.branch_order[parent_moving_node_index] == 2):
if(a[0] == moving_node_index):
self.child_index[:,parent_moving_node_index] = np.array([a[1],np.nan])
if(a[1] == moving_node_index):
self.child_index[:,parent_moving_node_index] = np.array([a[0],np.nan])
if(self.branch_order[parent_moving_node_index] == 1):
self.child_index[:,parent_moving_node_index] = np.array([np.nan,np.nan])
self.branch_order[parent_moving_node_index] -= 1
#self.set_parent()
if(self.branch_order[no_branch_node_index] == 1):
a = self.child_index[:,no_branch_node_index]
self.child_index[:,no_branch_node_index] = np.array([a[0],moving_node_index])
if(self.branch_order[no_branch_node_index] == 0):
self.child_index[:,no_branch_node_index] = np.array([moving_node_index,np.nan])
self.branch_order[no_branch_node_index] += 1
# adjust location
(segment,) = np.where(~np.isnan(self.connection[:,moving_node_index]))
self.location[0,segment] += self.location[0,no_branch_node_index] - self.location[0,parent_moving_node_index]
self.location[1,segment] += self.location[1,no_branch_node_index] - self.location[1,parent_moving_node_index]
self.location[2,segment] += self.location[2,no_branch_node_index] - self.location[2,parent_moving_node_index]
# adjust connection
(up_ind,) = np.where(~np.isnan(self.connection[parent_moving_node_index,:]))
self.connection[np.ix_(segment,up_ind)] = np.nan
(down_ind,) = np.where(~np.isnan(self.connection[no_branch_node_index,:]))
a = self.distance_from_parent[down_ind].reshape([1,len(down_ind)])
A = np.repeat(a,len(segment),axis = 0)
self.connection[np.ix_(segment,down_ind)] = A
self.set_ext_red_list()
self.set_distance_from_root()
self.set_distance_from_parent()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
#self.set_frustum()
self.set_features()
def horizental_stretch(self, node_index, parent_node, scale):
(up,) = np.where(~np.isnan(self.connection[node_index,:]))
(down,) = np.where(~np.isnan(self.connection[:,parent_node]))
I = np.intersect1d(up,down)
A = self.location[:,I]
loc = self.location[:,parent_node]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r = self.location[:,node_index] - loc
r = r/LA.norm(r,2)
A = scale*A +(1-scale)*(np.dot(np.expand_dims(r,axis = 1),np.expand_dims(np.dot(r,A),axis = 0)))
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
self.set_distance_from_root()
self.set_distance_from_parent()
for i in I:
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def vertical_stretch(self, node_index, parent_node, scale):
(up,) = np.where(~np.isnan(self.connection[node_index,:]))
(down,) = np.where(~np.isnan(self.connection[:,parent_node]))
I = np.intersect1d(up,down)
A = self.location[:,I]
loc = self.location[:,parent_node]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r = self.location[:,node_index] - loc
new_loc = -(1-scale)*(r)
r = r/LA.norm(r,2)
A = A -(1-scale)*(np.dot(np.expand_dims(r,axis = 1),np.expand_dims(np.dot(r,A),axis = 0)))
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
(T,) = np.where(~np.isnan(self.connection[:,node_index]))
T = list(T)
T.remove(node_index)
A = self.location[:,T]
A[0,:] += new_loc[0]
A[1,:] += new_loc[1]
A[2,:] += new_loc[2]
self.location[:,T] = A
self.set_distance_from_root()
self.set_distance_from_parent()
T = np.array(T)
for i in np.append(T,I):
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def sinusidal(self, node_index, parent_index, hight, n_vertical, n_horizental):
"""
NOT READY
"""
(up,) = np.where(~np.isnan(self.connection[node_index,:]))
(down,) = np.where(~np.isnan(self.connection[:,parent_node]))
I = np.intersect1d(up,down)
A = self.location[:,I]
loc = self.location[:,parent_node]
A[0,:] = A[0,:] - loc[0]
A[1,:] = A[1,:] - loc[1]
A[2,:] = A[2,:] - loc[2]
r = self.location[:,node_index] - loc
new_loc = -(1-scale)*(r)
r = r/LA.norm(r,2)
A = A -(1-scale)*(np.dot(np.expand_dims(r,axis = 1),np.expand_dims(np.dot(r,A),axis = 0)))
A[0,:] = A[0,:] + loc[0]
A[1,:] = A[1,:] + loc[1]
A[2,:] = A[2,:] + loc[2]
self.location[:,I] = A
(T,) = np.where(~np.isnan(self.connection[:,node_index]))
T = list(T)
T.remove(node_index)
A = self.location[:,T]
A[0,:] += new_loc[0]
A[1,:] += new_loc[1]
A[2,:] += new_loc[2]
self.location[:,T] = A
self.set_distance_from_root()
self.set_distance_from_parent()
T = np.array(T)
for i in np.append(T,I):
(J,) = np.where(~np.isnan(self.connection[:,i]))
self.connection[J,i] = self.distance_from_parent[i]
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
self.set_features()
def get_root(self):
"""
Obtain the root Node
Returns
-------
root : :class:`Node`
"""
return self.__root
def is_root(self, node):
"""
Check whether a Node is the root Node
Parameters
-----------
node : :class:`Node`
Node to be check if root
Returns
--------
is_root : boolean
True is the queried Node is the root, False otherwise
"""
if node.parent is None:
return True
else:
return False
def is_leaf(self, node):
"""
Check whether a Node is a leaf Node, i.e., a Node without children
Parameters
-----------
node : :class:`Node`
Node to be check if leaf Node
Returns
--------
is_leaf : boolean
True is the queried Node is a leaf, False otherwise
"""
if len(node.children) == 0:
return True
else:
return False
def is_branch(self, node):
"""
Check whether a Node is a branch Node, i.e., a Node with two children
Parameters
-----------
node : :class:`Node`
Node to be check if branch Node
Returns
--------
is_leaf : boolean
True is the queried Node is a branch, False otherwise
"""
if len(node.children) == 2:
return True
else:
return False
def find_root(self, node):
if node.parent is not None:
node = self.find_root(node.parent)
return node
def add_node_with_parent(self, node, parent):
"""
Add a Node to the tree under a specific parent Node
Parameters
-----------
node : :class:`Node`
Node to be added
parent : :class:`Node`
parent Node of the newly added Node
"""
node.parent = parent
if parent is not None:
parent.add_child(node)
self.add_node(node)
def add_node(self,node):
self.nodes_list.append(node)
def read_swc(self, input_file):
"""
Read the swc file and fill the attributes accordingly.
The assigned attributes are:
n_soma
n_node
nodes_list
location
type
diameter
parent_index
child_index
"""
self.n_soma = 0
self.nodes_list = []
self.location = np.array([0, 0, 0] ).reshape(3,1)
self.type = 1
self.parent_index = np.array([0])
child_index = csr_matrix((2,1000000))
f = open(input_file, 'r')
B = True
try:
for line in f:
if not line.startswith('#'):
split = line.split()
index = int(split[0].rstrip())
swc_type = int(split[1].rstrip())
x = float(split[2].rstrip())
y = float(split[3].rstrip())
z = float(split[4].rstrip())
radius = float(split[5].rstrip())
parent_index = int(split[6].rstrip())
if(parent_index == -1):
self.n_soma += 1
x_root = x
y_root = y
z_root = z
self.diameter = radius
else:
if(swc_type == 1):
self.n_soma += 1
self.location = np.append(self.location, np.array([x - x_root, y - y_root, z - z_root]).reshape(3,1), axis = 1)
self.diameter = np.append(self.diameter, radius)
self.type = np.append(self.type, swc_type)
self.parent_index = np.append(self.parent_index, parent_index - 1)
if(parent_index != 1):
if(child_index[0,parent_index-1]==0):
child_index[0,parent_index-1] = index-1
else:
child_index[1,parent_index-1] = index-1
node = Node()
node.xyz = np.array([x,y,z])
node.r = np.array([radius])
node.set_type(swc_type)
if(parent_index == -1):
self.add_node(node)
self.root = node
else:
self.add_node_with_parent(node,self.nodes_list[parent_index-1])
self.n_node = len(self.nodes_list)
a = child_index[:,0:self.n_node]
#a = a -1
a = a.toarray()
a[a==0] = np.nan
self.child_index = a
except:
print('deleted Neuron')
def read_swc_matrix(self, input_file):
"""
Read the an swc matrix and fill the attributes accordingly.
The assigned attributes are:
n_soma
n_node
nodes_list
location
type
diameter
parent_index
child_index
"""
self.n_soma = 0
self.nodes_list = []
self.location = np.array([0, 0, 0] ).reshape(3,1)
self.type = 1
self.parent_index = np.array([0])
child_index = csr_matrix((2,1000000))
n_node = input_file.shape[0]
for line in range(n_node):
index = input_file[line,0]
swc_type = input_file[line,1]
x = input_file[line,2]
y = input_file[line,3]
z = input_file[line,4]
radius = input_file[line,5]
parent_index = int(input_file[line,6])
if(parent_index == -1):
self.n_soma += 1
x_root = x
y_root = y
z_root = z
self.diameter = radius
else:
if(swc_type == 1):
self.n_soma += 1
self.location = np.append(self.location, np.array([x - x_root, y - y_root, z - z_root]).reshape(3,1), axis = 1)
self.diameter = np.append(self.diameter, radius)
self.type = np.append(self.type, swc_type)
self.parent_index = np.append(self.parent_index, parent_index - 1)
if(parent_index != 1):
if(child_index[0,parent_index-1]==0):
child_index[0,parent_index-1] = index-1
else:
child_index[1,parent_index-1] = index-1
node = Node()
node.xyz = np.array([x,y,z])
node.r = np.array([radius])
node.set_type(swc_type)
if(parent_index == -1):
self.add_node(node)
self.root = node
else:
self.add_node_with_parent(node,self.nodes_list[parent_index-1])
self.n_node = len(self.nodes_list)
a = child_index[:,0:self.n_node]
a = a.toarray()
a[a==0] = np.nan
self.child_index = a
def get_swc(self):
swc = np.zeros([self.n_node,7])
remain = [self.root]
index = np.array([-1])
for i in range(self.n_node):
n = remain[0]
swc[i,0] = i+1
swc[i,1] = n.set_type_from_name()
ind = self.get_index_for_no_soma_node(n)
if(ind > self.n_soma):
swc[i,2] = self.location[0,ind]
swc[i,3] = self.location[1,ind]
swc[i,4] = self.location[2,ind]
swc[i,5] = self.diameter[ind]
swc[i,6] = index[0]
else:
swc[i,2] = n.xyz[0]
swc[i,3] = n.xyz[1]
swc[i,4] = n.xyz[2]
swc[i,5] = n.r
swc[i,6] = 1
for m in n.children:
remain.append(m)
index = np.append(index,i+1)
remain = remain[1:]
index = index[1:]
swc[0,6] = -1
return swc
def write_swc(self, input_file):
"""
Used to write an SWC file from a morphology stored in this
:class:`Neuron`.
"""
writer = open(input_file, 'w')
swc = self.get_swc()
for i in range(swc.shape[0]):
string = (str(swc[i,0])+' '+str(swc[i,1]) + ' ' + str(swc[i,2]) +
' ' + str(swc[i,3]) + ' ' + str(swc[i,4]) +
' ' + str(swc[i,5]) + ' ' + str(swc[i,6]))
writer.write(string + '\n')
writer.flush()
writer.close()
def get_random_branching_or_end_node(self):
(b,) = np.where(self.branch_order[self.n_soma:] == 2)
(e,) = np.where(self.branch_order[self.n_soma:] == 0)
I = np.append(b,e)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
I += self.n_soma
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def get_random_no_soma_node(self):
l = self.n_node - self.n_soma
return self.nodes_list[(np.floor(l*np.random.rand()) + self.n_soma).astype(int)]
def get_random_branching_node(self):
"""
Return one of the branching point in the neuron.
dependency:
self.branch_order
self.nodes_list
self.n_soma
"""
(I,) = np.where(self.branch_order[self.n_soma:] == 2)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
I += self.n_soma
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def get_random_order_one_node_not_in_certain_index(self, index):
"""
Return one of the order one point in the neuron.
dependency:
self.branch_order
self.nodes_list
self.n_soma
"""
(I,) = np.where(self.branch_order == 1)
I = I[I>=self.n_soma]
I = np.setdiff1d(I,index)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def get_random_non_branch_node_not_in_certain_index(self, index):
"""
Return one of the order one point in the neuron.
dependency:
self.branch_order
self.nodes_list
self.n_soma
"""
(I,) = np.where(self.branch_order != 2)
I = I[I>=self.n_soma]
I = np.setdiff1d(I,index)
if(len(I) == 0):
n = Node()
n.type = 'empty'
else:
i = np.floor(len(I)*np.random.rand())
n = self.nodes_list[I[i]]
return n
def is_soma(self):
if(self.n_node == self.n_soma):
return True
else:
return False
def set_nodes_values(self):
i = 0
for n in self.nodes_list:
n.xyz = self.location[:,i]
n.r = self.diameter[i]
i += 1
def show_features(self,size_x = 15,size_y = 17 ,bin_size = 20):
n = 6
m = 2
plt.figure(figsize=(size_x,size_y))
plt.subplot(n,m,1)
a = self.global_angle
b = plt.hist(a[~np.isnan(a)],bins = bin_size,color = 'g')
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('Global angles')
plt.subplot(n,m,2)
a = self.local_angle
b = plt.hist(a[~np.isnan(a)],bins = bin_size,color = 'g')
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('Local angles')
plt.subplot(n,m,3)
plt.title('Neuronal/Euclidian distance from root')
a = self.features['ratio_euclidian_neuronal']
plt.hist(a[~np.isnan(a)],bins = bin_size ,color = 'g')
#plt.xlabel('ratio')
plt.ylabel('density')
plt.subplot(n,m,4)
plt.hist(self.distance_from_parent,bins = bin_size,color = 'g')
plt.title('Distance from parent')
#plt.xlabel('distance (um)')
plt.ylabel('density')
plt.subplot(n,m,5)
plt.hist(self.distance_from_root,bins = bin_size)
#plt.xlabel('distance (um)')
plt.ylabel('density')
plt.title('Distance from soma')
plt.subplot(n,m,6)
a = self.features['branch_angle']
plt.hist(a[~np.isnan(a)],bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('Angle at the branching points')
plt.subplot(n,m,7)
a = self.features['curvature']
plt.hist(a,bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('curvature')
plt.subplot(n,m,8)
a = self.features['neural_important']
plt.hist(a,bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('lenght of neural segments')
plt.subplot(n,m,9)
a = self.features['ratio_neural_euclidian_important']
plt.hist(a,bins = bin_size)
#plt.xlabel('angle (radian)')
plt.ylabel('density')
plt.title('ratio of neural to euclidian distance for segments')
#fig, ax = plt.subplots(n,m,6)
plt.subplot(n,m,10)
ind = np.arange(4)
width = 0.35
plt.bar(ind,(self.n_node,self.features['Nbranch'],self.features['initial_segments'],self.features['discrepancy_space']),color='r');
#plt.title('Numberical features')
#plt.set_xticks(ind + width)
plt.xticks(ind,('Nnodes', 'Nbranch', 'Ninitials', 'discrepancy'))
class Node(object):
"""
Node class for each nodes in the Neuron class.
each node has parent (another node), children (None, one or more nodes), radius,
Euclidian cordinates and type
The children are in a list
"""
def __init__(self):
self.parent = None
self.children = []
self.r = np.array([0.])
self.xyz = np.array([0.,0.,0.])
self.type = None # it can be soma, dendrite, axon, basal, apical
def get_parent(self):
"""
Return the parent Node of this one.
Returns
-------
parent : :class:`Node`
In case of the root, None is returned. Otherwise a :class:`Node` is
returned
"""
return self.__parent
def set_parent(self, parent):
"""
Set the parent Node of a given other Node
Parameters
----------
Node : :class:`Node`
"""
self.__parent = parent
def get_children(self):
"""
Return the children nodes of this one (if any)
Returns
-------
children : list :class:`Node`
In case of a leaf an empty list is returned
"""
return self.__children
def set_children(self, children):
"""
Set the children nodes of this one
Parameters
----------
children: list :class:`Node`
"""
self.__children = children
def get_radius(self):
"""
Returns
-------
radius : float
"""
return self.r
def set_radius(self, radius):
self.r = radius
def getxyz(self):
"""
Returns
-------
radius : float
"""
return self.xyz
def setxyz(self, xyz):
self.xyz = xyz
def set_type(self,index):
if(index == 0):
self.type = 'undefined'
elif(index == 1):
self.type = 'soma'
elif(index == 2):
self.type = 'axon'
elif(index == 3):
self.type = 'basal'
elif(index == 4):
self.type = 'apical'
def set_type_from_name(self):
if(self.type == 'undefined'):
return 0
if(self.type == 'soma'):
return 1
if(self.type == 'axon'):
return 2
if(self.type == 'basal'):
return 3
if(self.type == 'apical'):
return 4
def add_child(self, child_node):
"""
add a child to the children list of a given Node
Parameters
-----------
Node : :class:`Node`
"""
self.children.append(child_node)
def remove_child(self, child):
"""
Remove a child Node from the list of children of a specific Node
Parameters
-----------
Node : :class:`Node`
If the child doesn't exist, you get into problems.
"""
self.children.remove(child)
|
RoozbehFarhoodi/McNeuron
|
McNeuron/Neuron.py
|
Python
|
mit
| 70,044
|
[
"NEURON"
] |
54febba33464ac1fb6645fcfb15c9da762abffb097c0087d96cf72a86e989bb0
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import linalg
from ..preprocessing import LabelBinarizer
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in range(n_clusters):
C[k, :] *= generator.rand()
for f in range(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in range(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in range(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in range(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
return_indicator=False, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
if return_indicator:
lb = LabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples / 2), np.ones(n_samples / 2)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
depet/scikit-learn
|
sklearn/datasets/samples_generator.py
|
Python
|
bsd-3-clause
| 50,824
|
[
"Gaussian"
] |
9769d5140a5c9d4344e0a9204734b3a4b5720656fefb7699fa7f60eea7f19911
|
""" ProxyRepository class is a front-end to the proxy repository Database
"""
__RCSID__ = "$Id$"
import os
import glob
import time
import urllib
import random
import hashlib
import commands
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Security import Properties
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.Core.Security.MyProxy import MyProxy
from DIRAC.Core.Security.X509Request import X509Request # pylint: disable=import-error
from DIRAC.Core.Security.X509Chain import X509Chain, isPUSPdn # pylint: disable=import-error
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.Resources.ProxyProvider.ProxyProviderFactory import ProxyProviderFactory
class ProxyDB(DB):
NOTIFICATION_TIMES = [2592000, 1296000]
def __init__(self,
useMyProxy=False):
DB.__init__(self, 'ProxyDB', 'Framework/ProxyDB')
random.seed()
self.__defaultRequestLifetime = 300 # 5min
self.__defaultTokenLifetime = 86400 * 7 # 1 week
self.__defaultTokenMaxUses = 50
self.__useMyProxy = useMyProxy
self._minSecsToAllowStore = 3600
self.__notifClient = NotificationClient()
retVal = self.__initializeDB()
if not retVal['OK']:
raise Exception("Can't create tables: %s" % retVal['Message'])
self.purgeExpiredProxies(sendNotifications=False)
self.__checkDBVersion()
def getMyProxyServer(self):
""" Get MyProxy server from configuration
:return: basestring
"""
return gConfig.getValue("/DIRAC/VOPolicy/MyProxyServer", "myproxy.cern.ch")
def getMyProxyMaxLifeTime(self):
""" Get a maximum of the proxy lifetime delegated by MyProxy
:return: int -- time in a seconds
"""
return gConfig.getValue("/DIRAC/VOPolicy/MyProxyMaxDelegationTime", 168) * 3600
def getFromAddr(self):
""" Get the From address to use in proxy expiry e-mails.
:return: basestring
"""
cs_path = getDatabaseSection(self.fullname)
opt_path = "/%s/%s" % (cs_path, "FromAddr")
return gConfig.getValue(opt_path, "proxymanager@diracgrid.org")
def __initializeDB(self):
""" Create the tables
:result: S_OK()/S_ERROR()
"""
retVal = self._query("show tables")
if not retVal['OK']:
return retVal
tablesInDB = [t[0] for t in retVal['Value']]
tablesD = {}
if 'ProxyDB_Requests' not in tablesInDB:
tablesD['ProxyDB_Requests'] = {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'UserDN': 'VARCHAR(255) NOT NULL',
'Pem': 'BLOB',
'ExpirationTime': 'DATETIME'
},
'PrimaryKey': 'Id'
}
if 'ProxyDB_CleanProxies' not in tablesInDB:
tablesD['ProxyDB_CleanProxies'] = {'Fields': {'UserName': 'VARCHAR(64) NOT NULL',
'UserDN': 'VARCHAR(255) NOT NULL',
'ProxyProvider': 'VARCHAR(64) DEFAULT "Certificate"',
'Pem': 'BLOB',
'ExpirationTime': 'DATETIME',
},
'PrimaryKey': ['UserDN', 'ProxyProvider']
}
# WARN: Now proxies upload only in ProxyDB_CleanProxies, so this table will not be needed in some future
if 'ProxyDB_Proxies' not in tablesInDB:
tablesD['ProxyDB_Proxies'] = {'Fields': {'UserName': 'VARCHAR(64) NOT NULL',
'UserDN': 'VARCHAR(255) NOT NULL',
'UserGroup': 'VARCHAR(255) NOT NULL',
'Pem': 'BLOB',
'ExpirationTime': 'DATETIME',
'PersistentFlag': 'ENUM ("True","False") NOT NULL DEFAULT "True"',
},
'PrimaryKey': ['UserDN', 'UserGroup']
}
if 'ProxyDB_VOMSProxies' not in tablesInDB:
tablesD['ProxyDB_VOMSProxies'] = {'Fields': {'UserName': 'VARCHAR(64) NOT NULL',
'UserDN': 'VARCHAR(255) NOT NULL',
'UserGroup': 'VARCHAR(255) NOT NULL',
'VOMSAttr': 'VARCHAR(255) NOT NULL',
'Pem': 'BLOB',
'ExpirationTime': 'DATETIME',
},
'PrimaryKey': ['UserDN', 'UserGroup', 'vomsAttr']
}
if 'ProxyDB_Log' not in tablesInDB:
tablesD['ProxyDB_Log'] = {'Fields': {'ID': 'BIGINT NOT NULL AUTO_INCREMENT',
'IssuerDN': 'VARCHAR(255) NOT NULL',
'IssuerGroup': 'VARCHAR(255) NOT NULL',
'TargetDN': 'VARCHAR(255) NOT NULL',
'TargetGroup': 'VARCHAR(255) NOT NULL',
'Action': 'VARCHAR(128) NOT NULL',
'Timestamp': 'DATETIME',
},
'PrimaryKey': 'ID',
'Indexes': {'Timestamp': ['Timestamp']}
}
if 'ProxyDB_Tokens' not in tablesInDB:
tablesD['ProxyDB_Tokens'] = {'Fields': {'Token': 'VARCHAR(64) NOT NULL',
'RequesterDN': 'VARCHAR(255) NOT NULL',
'RequesterGroup': 'VARCHAR(255) NOT NULL',
'ExpirationTime': 'DATETIME NOT NULL',
'UsesLeft': 'SMALLINT UNSIGNED DEFAULT 1',
},
'PrimaryKey': 'Token'
}
if 'ProxyDB_ExpNotifs' not in tablesInDB:
tablesD['ProxyDB_ExpNotifs'] = {'Fields': {'UserDN': 'VARCHAR(255) NOT NULL',
'UserGroup': 'VARCHAR(255) NOT NULL',
'LifeLimit': 'INTEGER UNSIGNED DEFAULT 0',
'ExpirationTime': 'DATETIME NOT NULL',
},
'PrimaryKey': ['UserDN', 'UserGroup']
}
return self._createTables(tablesD)
def __addUserNameToTable(self, tableName):
""" Add user name to the table
:param basestring tableName: table name
:return: S_OK()/S_ERROR()
"""
result = self._update("ALTER TABLE `%s` ADD COLUMN UserName VARCHAR(64) NOT NULL" % tableName)
if not result['OK']:
return result
result = self._query("SELECT DISTINCT UserName, UserDN FROM `%s`" % tableName)
if not result['OK']:
return result
data = result['Value']
for userName, userDN in data:
if not userName:
result = Registry.getUsernameForDN(userDN)
if not result['OK']:
self.log.error("Could not retrieve username for DN", userDN)
continue
userName = result['Value']
try:
userName = self._escapeString(userName)['Value']
userDN = self._escapeString(userDN)['Value']
except KeyError:
self.log.error("Could not escape username or DN", "%s %s" % (userName, userDN))
continue
userName = result['Value']
result = self._update("UPDATE `%s` SET UserName=%s WHERE UserDN=%s" % (tableName, userName, userDN))
if not result['OK']:
self.log.error("Could update username for DN", "%s: %s" % (userDN, result['Message']))
continue
self.log.info("UserDN %s has user %s" % (userDN, userName))
return S_OK()
def __checkDBVersion(self):
""" Check DB tables for empty UserName option
:return: S_OK()/S_ERROR()
"""
for tableName in ("ProxyDB_CleanProxies", "ProxyDB_Proxies", "ProxyDB_VOMSProxies"):
result = self._query("describe `%s`" % tableName)
if not result['OK']:
return result
if 'UserName' not in [row[0] for row in result['Value']]:
self.log.notice("Username missing in table %s schema. Adding it" % tableName)
result = self.__addUserNameToTable(tableName)
if not result['OK']:
return result
def generateDelegationRequest(self, proxyChain, userDN):
""" Generate a request and store it for a given proxy Chain
:param X509Chain() proxyChain: proxy as chain
:param basestring userDN: user DN
:return: S_OK(dict)/S_ERROR() -- dict contain id and proxy as string of the request
"""
retVal = self._getConnection()
if not retVal['OK']:
return retVal
connObj = retVal['Value']
retVal = proxyChain.generateProxyRequest()
if not retVal['OK']:
return retVal
request = retVal['Value']
retVal = request.dumpRequest()
if not retVal['OK']:
return retVal
reqStr = retVal['Value']
retVal = request.dumpPKey()
if not retVal['OK']:
return retVal
allStr = reqStr + retVal['Value']
try:
sUserDN = self._escapeString(userDN)['Value']
sAllStr = self._escapeString(allStr)['Value']
except KeyError:
return S_ERROR("Cannot escape DN")
cmd = "INSERT INTO `ProxyDB_Requests` ( Id, UserDN, Pem, ExpirationTime )"
cmd += " VALUES ( 0, %s, %s, TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() ) )" % (sUserDN,
sAllStr,
int(self.__defaultRequestLifetime))
retVal = self._update(cmd, conn=connObj)
if not retVal['OK']:
return retVal
# 99% of the times we will stop here
if 'lastRowId' in retVal:
return S_OK({'id': retVal['lastRowId'], 'request': reqStr})
# If the lastRowId hack does not work. Get it by hand
retVal = self._query("SELECT Id FROM `ProxyDB_Requests` WHERE Pem='%s'" % reqStr)
if not retVal['OK']:
return retVal
data = retVal['Value']
if len(data) == 0:
return S_ERROR("Insertion of the request in the db didn't work as expected")
userGroup = proxyChain.getDIRACGroup().get('Value') or "unset"
self.logAction("request upload", userDN, userGroup, userDN, "any")
# Here we go!
return S_OK({'id': data[0][0], 'request': reqStr})
def __retrieveDelegationRequest(self, requestId, userDN):
""" Retrieve a request from the DB
:param int requestId: id of the request
:param basestring userDN: user DN
:return: S_OK(basestring)/S_ERROR()
"""
try:
sUserDN = self._escapeString(userDN)['Value']
except KeyError:
return S_ERROR("Cannot escape DN")
cmd = "SELECT Pem FROM `ProxyDB_Requests` WHERE Id = %s AND UserDN = %s" % (requestId, sUserDN)
retVal = self._query(cmd)
if not retVal['OK']:
return retVal
data = retVal['Value']
if len(data) == 0:
return S_ERROR("No requests with id %s" % requestId)
request = X509Request()
retVal = request.loadAllFromString(data[0][0])
if not retVal['OK']:
return retVal
return S_OK(request)
def purgeExpiredRequests(self):
""" Purge expired requests from the db
:return: S_OK()/S_ERROR()
"""
cmd = "DELETE FROM `ProxyDB_Requests` WHERE ExpirationTime < UTC_TIMESTAMP()"
return self._update(cmd)
def deleteRequest(self, requestId):
""" Delete a request from the db
:param int requestId: id of the request
:return: S_OK()/S_ERROR()
"""
cmd = "DELETE FROM `ProxyDB_Requests` WHERE Id=%s" % requestId
return self._update(cmd)
def completeDelegation(self, requestId, userDN, delegatedPem):
""" Complete a delegation and store it in the db
:param int requestId: id of the request
:param basestring userDN: user DN
:param basestring delegatedPem: delegated proxy as string
:return: S_OK()/S_ERROR()
"""
retVal = self.__retrieveDelegationRequest(requestId, userDN)
if not retVal['OK']:
return retVal
request = retVal['Value']
chain = X509Chain(keyObj=request.getPKey())
retVal = chain.loadChainFromString(delegatedPem)
if not retVal['OK']:
return retVal
retVal = chain.isValidProxy()
if not retVal['OK']:
return retVal
result = chain.isVOMS()
if result['OK'] and result.get('Value'):
return S_ERROR("Proxies with VOMS extensions are not allowed to be uploaded")
# This test does not seem to make any sense whatsoever, since
# we just created the Chain using the request pkey....
# of course it will match !
retVal = request.checkChain(chain)
if not retVal['OK']:
return retVal
if not retVal['Value']:
return S_ERROR("Received chain does not match request: %s" % retVal['Message'])
retVal = chain.getDIRACGroup(ignoreDefault=True)
if not retVal['OK']:
return retVal
if retVal['Value']:
return S_ERROR("Proxies with DIRAC group extensions are not allowed to be uploaded: %s" % retVal['Value'])
retVal = self.__storeProxy(userDN, chain)
if not retVal['OK']:
return retVal
retVal = self.deleteRequest(requestId)
if not retVal['OK']:
return retVal
return S_OK()
def __storeProxy(self, userDN, chain, proxyProvider=None):
""" Store user proxy into the Proxy repository for a user specified by his
DN and group or proxy provider.
:param basestring userDN: user DN from proxy
:param X509Chain() chain: proxy chain
:param basestring proxyProvider: proxy provider name
:return: S_OK()/S_ERROR()
"""
retVal = Registry.getUsernameForDN(userDN)
if not retVal['OK']:
return retVal
userName = retVal['Value']
if not proxyProvider:
result = Registry.getProxyProvidersForDN(userDN)
if result['OK'] and result.get('Value'):
proxyProvider = result['Value'][0]
# Get remaining secs
retVal = chain.getRemainingSecs()
if not retVal['OK']:
return retVal
remainingSecs = retVal['Value']
if remainingSecs < self._minSecsToAllowStore:
return S_ERROR(
"Cannot store proxy, remaining secs %s is less than %s" %
(remainingSecs, self._minSecsToAllowStore))
# Compare the DNs
retVal = chain.getIssuerCert()
if not retVal['OK']:
return retVal
proxyIdentityDN = retVal['Value'].getSubjectDN()['Value']
if userDN != proxyIdentityDN:
msg = "Mismatch in the user DN"
vMsg = "Proxy says %s and credentials are %s" % (proxyIdentityDN, userDN)
self.log.error(msg, vMsg)
return S_ERROR("%s. %s" % (msg, vMsg))
# Check if its limited
if chain.isLimitedProxy()['Value']:
return S_ERROR("Limited proxies are not allowed to be stored")
dLeft = remainingSecs / 86400
hLeft = remainingSecs / 3600 - dLeft * 24
mLeft = remainingSecs / 60 - hLeft * 60 - dLeft * 1440
sLeft = remainingSecs - hLeft * 3600 - mLeft * 60 - dLeft * 86400
self.log.info("Storing proxy for credentials %s (%d:%02d:%02d:%02d left)" %
(proxyIdentityDN, dLeft, hLeft, mLeft, sLeft))
try:
sUserDN = self._escapeString(userDN)['Value']
sTable = 'ProxyDB_CleanProxies'
except KeyError:
return S_ERROR("Cannot escape DN")
# Check what we have already got in the repository
cmd = "SELECT TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ), Pem "
cmd += "FROM `%s` WHERE UserDN=%s " % (sTable, sUserDN)
result = self._query(cmd)
if not result['OK']:
return result
# Check if there is a previous ticket for the DN
data = result['Value']
sqlInsert = True
if len(data) > 0:
sqlInsert = False
pem = data[0][1]
if pem:
remainingSecsInDB = data[0][0]
if remainingSecs <= remainingSecsInDB:
self.log.info(
"Proxy stored is longer than uploaded, omitting.",
"%s in uploaded, %s in db" %
(remainingSecs,
remainingSecsInDB))
return S_OK()
pemChain = chain.dumpAllToString()['Value']
dValues = {'UserName': self._escapeString(userName)['Value'],
'UserDN': sUserDN,
'Pem': self._escapeString(pemChain)['Value'],
'ExpirationTime': 'TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() )' % int(remainingSecs)}
if proxyProvider:
dValues['ProxyProvider'] = "'%s'" % proxyProvider
if sqlInsert:
sqlFields = []
sqlValues = []
for key in dValues:
sqlFields.append(key)
sqlValues.append(dValues[key])
cmd = "INSERT INTO `%s` ( %s ) VALUES ( %s )" % (sTable, ", ".join(sqlFields), ", ".join(sqlValues))
else:
sqlSet = []
sqlWhere = []
for k in dValues:
if k in ('UserDN', 'ProxyProvider'):
sqlWhere.append("%s = %s" % (k, dValues[k]))
else:
sqlSet.append("%s = %s" % (k, dValues[k]))
cmd = "UPDATE `%s` SET %s WHERE %s" % (sTable, ", ".join(sqlSet), " AND ".join(sqlWhere))
self.logAction("store proxy", userDN, proxyProvider, userDN, proxyProvider)
return self._update(cmd)
def purgeExpiredProxies(self, sendNotifications=True):
""" Purge expired requests from the db
:param boolean sendNotifications: if need to send notification
:return: S_OK(int)/S_ERROR() -- int is number of purged expired proxies
"""
purged = 0
for tableName in ("ProxyDB_CleanProxies", "ProxyDB_Proxies", "ProxyDB_VOMSProxies"):
cmd = "DELETE FROM `%s` WHERE ExpirationTime < UTC_TIMESTAMP()" % tableName
result = self._update(cmd)
if not result['OK']:
return result
purged += result['Value']
self.log.info("Purged %s expired proxies from %s" % (result['Value'], tableName))
if sendNotifications:
result = self.sendExpirationNotifications()
if not result['OK']:
return result
return S_OK(purged)
def deleteProxy(self, userDN, userGroup=None, proxyProvider=None):
""" Remove proxy of the given user from the repository
:param basestring userDN: user DN
:param basestring userGroup: DIRAC group
:param basestring proxyProvider: proxy provider name
:return: S_OK()/S_ERROR()
"""
try:
userDN = self._escapeString(userDN)['Value']
if userGroup:
userGroup = self._escapeString(userGroup)['Value']
if proxyProvider:
proxyProvider = self._escapeString(proxyProvider)['Value']
except KeyError:
return S_ERROR("Invalid DN or group or proxy provider")
errMsgs = []
req = "DELETE FROM `%%s` WHERE UserDN=%s" % userDN
if proxyProvider or not userGroup:
result = self._update('%s %s' % (req % 'ProxyDB_CleanProxies',
proxyProvider and 'AND ProxyProvider=%s' % proxyProvider or ''))
if not result['OK']:
errMsgs.append(result['Message'])
for table in ['ProxyDB_Proxies', 'ProxyDB_VOMSProxies']:
result = self._update('%s %s' % (req % table,
userGroup and 'AND UserGroup=%s' % userGroup or ''))
if not result['OK']:
if result['Message'] not in errMsgs:
errMsgs.append(result['Message'])
if errMsgs:
return S_ERROR(', '.join(errMsgs))
return result
def __getPemAndTimeLeft(self, userDN, userGroup=None, vomsAttr=None, proxyProvider=None):
""" Get proxy from database
:param basestring userDN: user DN
:param basestring userGroup: requested DIRAC group
:param basestring vomsAttr: VOMS name
:param basestring proxyProvider: proxy provider name
:return: S_OK(tuple)/S_ERROR() -- tuple contain proxy as string and remaining seconds
"""
try:
sUserDN = self._escapeString(userDN)['Value']
if userGroup:
sUserGroup = self._escapeString(userGroup)['Value']
if vomsAttr:
sVomsAttr = self._escapeString(vomsAttr)['Value']
except KeyError:
return S_ERROR("Invalid DN or Group")
if proxyProvider:
sTable = "`ProxyDB_CleanProxies`"
elif not vomsAttr:
sTable = "`ProxyDB_Proxies`"
else:
sTable = "`ProxyDB_VOMSProxies`"
cmd = "SELECT Pem, TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) from %s " % sTable
cmd += "WHERE UserDN=%s AND TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > 0" % (sUserDN)
if proxyProvider:
cmd += ' AND ProxyProvider="%s"' % proxyProvider
else:
if userGroup:
cmd += " AND UserGroup=%s" % sUserGroup
if vomsAttr:
cmd += " AND VOMSAttr=%s" % sVomsAttr
retVal = self._query(cmd)
if not retVal['OK']:
return retVal
data = retVal['Value']
for record in data:
if record[0]:
if proxyProvider:
chain = X509Chain()
result = chain.loadProxyFromString(record[0])
if not result['OK']:
return result
result = chain.generateProxyToString(record[1], diracGroup=userGroup, rfc=True)
if not result['OK']:
return result
return S_OK((result['Value'], record[1]))
return S_OK((record[0], record[1]))
if userGroup:
userMask = "%s@%s" % (userDN, userGroup)
else:
userMask = userDN
return S_ERROR("%s has no proxy registered" % userMask)
def renewFromMyProxy(self, userDN, userGroup, lifeTime=None, chain=None):
""" Renew proxy from MyProxy
:param basestring userDN: user DN
:param basestring userGroup: user group
:param int lifeTime: needed proxy live time in a seconds
:param X509Chain chain: proxy as chain
:return: S_OK(X509Chain/S_ERROR()
"""
if not lifeTime:
lifeTime = 43200
if not self.__useMyProxy:
return S_ERROR("myproxy is disabled")
# Get the chain
if not chain:
retVal = self.__getPemAndTimeLeft(userDN, userGroup)
if not retVal['OK']:
return retVal
pemData = retVal['Value'][0]
chain = X509Chain()
retVal = chain.loadProxyFromString(pemData)
if not retVal['OK']:
return retVal
originChainLifeTime = chain.getRemainingSecs()['Value']
maxMyProxyLifeTime = self.getMyProxyMaxLifeTime()
# If we have a chain that's 0.8 of max mplifetime don't ask to mp
if originChainLifeTime > maxMyProxyLifeTime * 0.8:
self.log.error("Skipping myproxy download",
"user %s %s chain has %s secs and requested %s secs" % (userDN,
userGroup,
originChainLifeTime,
maxMyProxyLifeTime))
return S_OK(chain)
lifeTime *= 1.3
if lifeTime > maxMyProxyLifeTime:
lifeTime = maxMyProxyLifeTime
self.log.info("Renewing proxy from myproxy", "user %s %s for %s secs" % (userDN, userGroup, lifeTime))
myProxy = MyProxy(server=self.getMyProxyServer())
retVal = myProxy.getDelegatedProxy(chain, lifeTime)
if not retVal['OK']:
return retVal
mpChain = retVal['Value']
retVal = mpChain.getRemainingSecs()
if not retVal['OK']:
return S_ERROR("Can't retrieve remaining secs from renewed proxy: %s" % retVal['Message'])
mpChainSecsLeft = retVal['Value']
if mpChainSecsLeft < originChainLifeTime:
self.log.info("Chain downloaded from myproxy has less lifetime than the one stored in the db",
"\n Downloaded from myproxy: %s secs\n Stored in DB: %s secs" % (mpChainSecsLeft,
originChainLifeTime))
return S_OK(chain)
retVal = mpChain.getDIRACGroup()
if not retVal['OK']:
return S_ERROR("Can't retrieve DIRAC Group from renewed proxy: %s" % retVal['Message'])
chainGroup = retVal['Value']
if chainGroup != userGroup:
return S_ERROR("Mismatch between renewed proxy group and expected: %s vs %s" % (userGroup, chainGroup))
retVal = self.__storeProxy(userDN, userGroup, mpChain)
if not retVal['OK']:
self.log.error("Cannot store proxy after renewal", retVal['Message'])
retVal = myProxy.getServiceDN()
if not retVal['OK']:
hostDN = userDN
else:
hostDN = retVal['Value']
self.logAction("myproxy renewal", hostDN, "host", userDN, userGroup)
return S_OK(mpChain)
# WARN: this method will not be needed if CS section Users/<user>/DNProperties will be for every user
# in this case will be used proxy providers that described there
def __getPUSProxy(self, userDN, userGroup, requiredLifetime, requestedVOMSAttr=False):
result = Registry.getGroupsForDN(userDN)
if not result['OK']:
return result
validGroups = result['Value']
if userGroup not in validGroups:
return S_ERROR('Invalid group %s for user' % userGroup)
voName = Registry.getVOForGroup(userGroup)
if not voName:
return S_ERROR('Can not determine VO for group %s' % userGroup)
retVal = self.__getVOMSAttribute(userGroup, requestedVOMSAttr)
if not retVal['OK']:
return retVal
vomsAttribute = retVal['Value']['attribute']
vomsVO = retVal['Value']['VOMSVO']
puspServiceURL = Registry.getVOOption(voName, 'PUSPServiceURL')
if not puspServiceURL:
return S_ERROR('Can not determine PUSP service URL for VO %s' % voName)
user = userDN.split(":")[-1]
puspURL = "%s?voms=%s:%s&proxy-renewal=false&disable-voms-proxy=false" \
"&rfc-proxy=true&cn-label=user:%s" % (puspServiceURL, vomsVO, vomsAttribute, user)
try:
proxy = urllib.urlopen(puspURL).read()
except Exception as e:
return S_ERROR('Failed to get proxy from the PUSP server')
chain = X509Chain()
chain.loadChainFromString(proxy)
chain.loadKeyFromString(proxy)
result = chain.getCredentials()
if not result['OK']:
return S_ERROR('Failed to get a valid PUSP proxy')
credDict = result['Value']
if credDict['identity'] != userDN:
return S_ERROR('Requested DN does not match the obtained one in the PUSP proxy')
timeLeft = credDict['secondsLeft']
result = chain.generateProxyToString(timeLeft, diracGroup=userGroup)
if not result['OK']:
return result
proxyString = result['Value']
return S_OK((proxyString, timeLeft))
def __generateProxyFromProxyProvider(self, userDN, proxyProvider):
""" Get proxy from proxy provider
:param basestring userDN: user DN for what need to create proxy
:param basestring proxyProvider: proxy provider name that will ganarete proxy
:return: S_OK(dict)/S_ERROR() -- dict with remaining seconds, proxy as a string and as a chain
"""
gLogger.info('Getting proxy from proxyProvider', '(for "%s" DN by "%s")' % (userDN, proxyProvider))
result = ProxyProviderFactory().getProxyProvider(proxyProvider)
if not result['OK']:
return result
pp = result['Value']
result = pp.getProxy({"DN": userDN})
if not result['OK']:
return result
proxyStr = result['Value']
chain = X509Chain()
result = chain.loadProxyFromString(proxyStr)
if not result['OK']:
return result
result = chain.getRemainingSecs()
if not result['OK']:
return result
remainingSecs = result['Value']
result = self.__storeProxy(userDN, chain, proxyProvider)
if result['OK']:
return S_OK({'proxy': proxyStr, 'chain': chain, 'remainingSecs': remainingSecs})
return result
def __getProxyFromProxyProviders(self, userDN, userGroup, requiredLifeTime):
""" Generate new proxy from exist clean proxy or from proxy provider
for use with userDN in the userGroup
:param basestring userDN: user DN
:param basestring userGroup: required group name
:param int requiredLifeTime: required proxy live time in a seconds
:return: S_OK(tuple)/S_ERROR() -- tuple contain proxy as string and remainig seconds
"""
result = Registry.getGroupsForDN(userDN)
if not result['OK']:
return S_ERROR('Cannot generate proxy: %s' % result['Message'])
if userGroup not in result['Value']:
return S_ERROR('Cannot generate proxy: Invalid group %s for user' % userGroup)
result = Registry.getProxyProvidersForDN(userDN)
if result['OK']:
providers = result['Value']
providers.append('Certificate')
for proxyProvider in providers:
result = self.__getPemAndTimeLeft(userDN, userGroup, proxyProvider=proxyProvider)
if result['OK'] and (not requiredLifeTime or result['Value'][1] > requiredLifeTime):
return result
if len(providers) == 1:
return S_ERROR('Cannot generate proxy: No proxy providers found for "%s"' % userDN)
result = self.__generateProxyFromProxyProvider(userDN, proxyProvider)
if result['OK']:
chain = result['Value']['chain']
remainingSecs = result['Value']['remainingSecs']
result = chain.generateProxyToString(remainingSecs, diracGroup=userGroup, rfc=True)
if result['OK']:
return S_OK((result['Value'], remainingSecs))
return S_ERROR('Cannot generate proxy%s' %
(result.get('Message') and ': ' + result.get('Message') or ''))
def getProxy(self, userDN, userGroup, requiredLifeTime=None):
""" Get proxy string from the Proxy Repository for use with userDN
in the userGroup
:param basestring userDN: user DN
:param basestring userGroup: required DIRAC group
:param int requiredLifeTime: required proxy live time in a seconds
:return: S_OK(tuple)/S_ERROR() -- tuple with proxy as chain and proxy live time in a seconds
"""
# Test that group enable to download
if not Registry.isDownloadableGroup(userGroup):
return S_ERROR('"%s" group is disable to download.' % userGroup)
# WARN: this block will not be needed if CS section Users/<user>/DNProperties will be for every user
# in this case will be used proxy providers that described there
# Get the Per User SubProxy if one is requested
if isPUSPdn(userDN):
result = self.__getPUSProxy(userDN, userGroup, requiredLifeTime)
if not result['OK']:
return result
pemData = result['Value'][0]
timeLeft = result['Value'][1]
chain = X509Chain()
result = chain.loadProxyFromString(pemData)
if not result['OK']:
return result
return S_OK((chain, timeLeft))
# Standard proxy is requested
retVal = self.__getPemAndTimeLeft(userDN, userGroup)
errMsg = "Can't get proxy%s: " % (requiredLifeTime and ' for %s seconds' % requiredLifeTime or '')
if not retVal['OK']:
errMsg += '%s, try to generate new' % retVal['Message']
retVal = self.__getProxyFromProxyProviders(userDN, userGroup, requiredLifeTime=requiredLifeTime)
elif requiredLifeTime:
if retVal['Value'][1] < requiredLifeTime and not self.__useMyProxy:
errMsg += 'Stored proxy is not long lived enough, try to generate new'
retVal = self.__getProxyFromProxyProviders(userDN, userGroup, requiredLifeTime=requiredLifeTime)
if not retVal['OK']:
return S_ERROR("%s; %s" % (errMsg, retVal['Message']))
pemData = retVal['Value'][0]
timeLeft = retVal['Value'][1]
chain = X509Chain()
result = chain.loadProxyFromString(pemData)
if not retVal['OK']:
return S_ERROR("%s; %s" % (errMsg, retVal['Message']))
if self.__useMyProxy:
if requiredLifeTime:
if timeLeft < requiredLifeTime:
retVal = self.renewFromMyProxy(userDN, userGroup, lifeTime=requiredLifeTime, chain=chain)
if not retVal['OK']:
return S_ERROR("%s; the proxy lifetime from MyProxy is less than required." % errMsg)
chain = retVal['Value']
# Proxy is invalid for some reason, let's delete it
if not chain.isValidProxy()['OK']:
self.deleteProxy(userDN, userGroup)
return S_ERROR("%s@%s has no proxy registered" % (userDN, userGroup))
return S_OK((chain, timeLeft))
def __getVOMSAttribute(self, userGroup, requiredVOMSAttribute=False):
""" Get VOMS attribute for DIRAC group
:param basestring userGroup: DIRAC group
:param boolean requiredVOMSAttribute: VOMS attribute
:return: S_OK(dict)/S_ERROR() -- dict contain attribute and VOMS VO
"""
if requiredVOMSAttribute:
return S_OK({'attribute': requiredVOMSAttribute, 'VOMSVO': Registry.getVOMSVOForGroup(userGroup)})
csVOMSMapping = Registry.getVOMSAttributeForGroup(userGroup)
if not csVOMSMapping:
return S_ERROR("No mapping defined for group %s in the CS" % userGroup)
return S_OK({'attribute': csVOMSMapping, 'VOMSVO': Registry.getVOMSVOForGroup(userGroup)})
def getVOMSProxy(self, userDN, userGroup, requiredLifeTime=None, requestedVOMSAttr=None):
""" Get proxy string from the Proxy Repository for use with userDN
in the userGroup
:param basestring userDN: user DN
:param basestring userGroup: required DIRAC group
:param int requiredLifeTime: required proxy live time in a seconds
:param basestring requestedVOMSAttr: VOMS attribute
:return: S_OK(tuple)/S_ERROR() -- tuple with proxy as chain and proxy live time in a seconds
"""
retVal = self.__getVOMSAttribute(userGroup, requestedVOMSAttr)
if not retVal['OK']:
return retVal
vomsAttr = retVal['Value']['attribute']
vomsVO = retVal['Value']['VOMSVO']
# Look in the cache
retVal = self.__getPemAndTimeLeft(userDN, userGroup, vomsAttr)
if retVal['OK']:
pemData = retVal['Value'][0]
vomsTime = retVal['Value'][1]
chain = X509Chain()
retVal = chain.loadProxyFromString(pemData)
if retVal['OK']:
retVal = chain.getRemainingSecs()
if retVal['OK']:
remainingSecs = retVal['Value']
if requiredLifeTime and requiredLifeTime <= vomsTime and requiredLifeTime <= remainingSecs:
return S_OK((chain, min(vomsTime, remainingSecs)))
if isPUSPdn(userDN):
# Get the Per User SubProxy if one is requested
result = self.__getPUSProxy(userDN, userGroup, requiredLifeTime, requestedVOMSAttr)
if not result['OK']:
return result
pemData = result['Value'][0]
chain = X509Chain()
result = chain.loadProxyFromString(pemData)
if not result['OK']:
return result
else:
# Get the stored proxy and dress it with the VOMS extension
retVal = self.getProxy(userDN, userGroup, requiredLifeTime)
if not retVal['OK']:
return retVal
chain, secsLeft = retVal['Value']
vomsMgr = VOMS()
attrs = vomsMgr.getVOMSAttributes(chain).get('Value') or ['']
if attrs[0]:
if vomsAttr != attrs[0]:
return S_ERROR("Stored proxy has already a different VOMS attribute %s than requested %s" %
(attrs[0], vomsAttr))
else:
retVal = vomsMgr.setVOMSAttributes(chain, vomsAttr, vo=vomsVO)
if not retVal['OK']:
return S_ERROR("Cannot append voms extension: %s" % retVal['Message'])
chain = retVal['Value']
# We have got the VOMS proxy, store it into the cache
result = self.__storeVOMSProxy(userDN, userGroup, vomsAttr, chain)
if not result['OK']:
return result
return S_OK((chain, result['Value']))
def __storeVOMSProxy(self, userDN, userGroup, vomsAttr, chain):
""" Store VOMS proxy
:param basestring userDN: user DN
:param basestring userGroup: DIRAC group
:param basestring vomsAttr: VOMS attribute
:param X509Chain() chain: proxy as chain
:return: S_OK(basestring)/S_ERROR()
"""
retVal = self._getConnection()
if not retVal['OK']:
return retVal
connObj = retVal['Value']
retVal1 = VOMS().getVOMSProxyInfo(chain, 'actimeleft')
retVal2 = VOMS().getVOMSProxyInfo(chain, 'timeleft')
if not retVal1['OK']:
return retVal1
if not retVal2['OK']:
return retVal2
try:
vomsSecsLeft1 = int(retVal1['Value'].strip())
vomsSecsLeft2 = int(retVal2['Value'].strip())
vomsSecsLeft = min(vomsSecsLeft1, vomsSecsLeft2)
except Exception as e:
return S_ERROR("Can't parse VOMS time left: %s" % str(e))
secsLeft = min(vomsSecsLeft, chain.getRemainingSecs()['Value'])
pemData = chain.dumpAllToString()['Value']
result = Registry.getUsernameForDN(userDN)
if not result['OK']:
userName = ""
else:
userName = result['Value']
try:
sUserName = self._escapeString(userName)['Value']
sUserDN = self._escapeString(userDN)['Value']
sUserGroup = self._escapeString(userGroup)['Value']
sVomsAttr = self._escapeString(vomsAttr)['Value']
sPemData = self._escapeString(pemData)['Value']
except KeyError:
return S_ERROR("Could not escape some data")
cmd = "REPLACE INTO `ProxyDB_VOMSProxies` ( UserName, UserDN, UserGroup, VOMSAttr, Pem, ExpirationTime ) VALUES "
cmd += "( %s, %s, %s, %s, %s, TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() ) )" % (sUserName, sUserDN, sUserGroup,
sVomsAttr, sPemData, secsLeft)
result = self._update(cmd, conn=connObj)
if not result['OK']:
return result
return S_OK(secsLeft)
def getUsers(self, validSecondsLeft=0, userName=None):
""" Get all the distinct users from the Proxy Repository. Optionally, only users
with valid proxies within the given validity period expressed in seconds
:param int validSecondsLeft: validity period expressed in seconds
:param basestring userName: user name that need to add to search filter
:return: S_OK(list)/S_ERROR() -- list contain dicts with user name, DN, group
expiration time, persistent flag
"""
data = []
sqlCond = []
if validSecondsLeft:
try:
validSecondsLeft = int(validSecondsLeft)
except ValueError:
return S_ERROR("Seconds left has to be an integer")
sqlCond.append("TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) > %d" % validSecondsLeft)
if userName:
try:
sUserName = self._escapeString(userName)['Value']
except KeyError:
return S_ERROR("Can't escape user name")
sqlCond.append('UserName = %s' % sUserName)
for table, fields in [('ProxyDB_CleanProxies', ("UserName", "UserDN", "ExpirationTime")),
('ProxyDB_Proxies', ("UserName", "UserDN", "UserGroup", "ExpirationTime", "PersistentFlag"))]:
cmd = "SELECT %s FROM `%s`" % (", ".join(fields), table)
if sqlCond:
cmd += " WHERE %s" % " AND ".join(sqlCond)
retVal = self._query(cmd)
if not retVal['OK']:
return retVal
for record in retVal['Value']:
record = list(record)
if table == 'ProxyDB_CleanProxies':
record.insert(2, '')
record.insert(4, False)
data.append({'Name': record[0],
'DN': record[1],
'group': record[2],
'expirationtime': record[3],
'persistent': record[4] == 'True'})
return S_OK(data)
def getCredentialsAboutToExpire(self, requiredSecondsLeft, onlyPersistent=True):
""" Get credentials about to expire for MyProxy
:param int requiredSecondsLeft: required seconds left
:param boolean onlyPersistent: look records only with persistent flag
:return: S_OK()/S_ERROR()
"""
cmd = "SELECT UserDN, UserGroup, ExpirationTime, PersistentFlag FROM `ProxyDB_Proxies`"
cmd += " WHERE TIMESTAMPDIFF( SECOND, ExpirationTime, UTC_TIMESTAMP() ) < %d and " % requiredSecondsLeft
cmd += "TIMESTAMPDIFF( SECOND, ExpirationTime, UTC_TIMESTAMP() ) > 0"
if onlyPersistent:
cmd += " AND PersistentFlag = 'True'"
return self._query(cmd)
def setPersistencyFlag(self, userDN, userGroup, persistent=True):
""" Set the proxy PersistentFlag to the flag value
:param basestring userDN: user DN
:param basestring userGroup: group name
:param boolean persistent: enable persistent flag
:return: S_OK()/S_ERROR()
"""
try:
sUserDN = self._escapeString(userDN)['Value']
sUserGroup = self._escapeString(userGroup)['Value']
except KeyError:
return S_ERROR("Can't escape something")
if persistent:
sqlFlag = "True"
else:
sqlFlag = "False"
retVal = self._query(
"SELECT PersistentFlag FROM `ProxyDB_Proxies` WHERE UserDN=%s AND UserGroup=%s" %
(sUserDN, sUserGroup))
sqlInsert = True
if retVal['OK']:
data = retVal['Value']
if len(data) > 0:
sqlInsert = False
if data[0][0] == sqlFlag:
return S_OK()
if sqlInsert:
# If it's not in the db and we're removing the persistency then do nothing
if not persistent:
return S_OK()
cmd = "INSERT INTO `ProxyDB_Proxies` ( UserDN, UserGroup, Pem, ExpirationTime, PersistentFlag ) VALUES "
cmd += "( %s, %s, '', UTC_TIMESTAMP(), 'True' )" % (sUserDN, sUserGroup)
else:
cmd = "UPDATE `ProxyDB_Proxies` SET PersistentFlag='%s' WHERE UserDN=%s AND UserGroup=%s" % (sqlFlag,
sUserDN,
sUserGroup)
retVal = self._update(cmd)
if not retVal['OK']:
return retVal
return S_OK()
def getProxiesContent(self, selDict, sortList, start=0, limit=0):
""" Get the contents of the db, parameters are a filter to the db
:param dict selDict: selection dict that contain fields and their posible values
:param dict sortList: dict with sorting fields
:param int,long start: search limit start
:param int,long start: search limit amount
:return: S_OK(dict)/S_ERROR() -- dict contain fields, record list, total records
"""
data = []
sqlWhere = ["Pem is not NULL"]
for table, fields in [('ProxyDB_CleanProxies', ("UserName", "UserDN", "ExpirationTime")),
('ProxyDB_Proxies', ("UserName", "UserDN", "UserGroup", "ExpirationTime", "PersistentFlag"))]:
cmd = "SELECT %s FROM `%s`" % (", ".join(fields), table)
for field in selDict:
if field not in fields:
continue
fVal = selDict[field]
if isinstance(fVal, (dict, tuple, list)):
sqlWhere.append("%s in (%s)" %
(field, ", ".join([self._escapeString(str(value))['Value'] for value in fVal])))
else:
sqlWhere.append("%s = %s" % (field, self._escapeString(str(fVal))['Value']))
sqlOrder = []
if sortList:
for sort in sortList:
if len(sort) == 1:
sort = (sort, "DESC")
elif len(sort) > 2:
return S_ERROR("Invalid sort %s" % sort)
if sort[0] not in fields:
if table == 'ProxyDB_CleanProxies' and sort[0] in ['UserGroup', 'PersistentFlag']:
continue
return S_ERROR("Invalid sorting field %s" % sort[0])
if sort[1].upper() not in ("ASC", "DESC"):
return S_ERROR("Invalid sorting order %s" % sort[1])
sqlOrder.append("%s %s" % (sort[0], sort[1]))
if sqlWhere:
cmd = "%s WHERE %s" % (cmd, " AND ".join(sqlWhere))
if sqlOrder:
cmd = "%s ORDER BY %s" % (cmd, ", ".join(sqlOrder))
if limit:
try:
start = int(start)
limit = int(limit)
except ValueError:
return S_ERROR("start and limit have to be integers")
cmd += " LIMIT %d,%d" % (start, limit)
retVal = self._query(cmd)
if not retVal['OK']:
return retVal
for record in retVal['Value']:
record = list(record)
if table == 'ProxyDB_CleanProxies':
record.insert(2, '')
record.insert(4, False)
record[4] = record[4] == 'True'
data.append(record)
totalRecords = len(data)
# cmd = "SELECT COUNT( UserGroup ) FROM `ProxyDB_Proxies`"
# if sqlWhere:
# cmd = "%s WHERE %s" % (cmd, " AND ".join(sqlWhere))
# retVal = self._query(cmd)
# if retVal['OK']:
# totalRecords = retVal['Value'][0][0]
return S_OK({'ParameterNames': fields, 'Records': data, 'TotalRecords': totalRecords})
def logAction(self, action, issuerDN, issuerGroup, targetDN, targetGroup):
""" Add an action to the log
:param basestring action: proxy action
:param basestring issuerDN: user DN of issuer
:param basestring issuerGroup: DIRAC group of issuer
:param basestring targetDN: user DN of target
:param basestring targetGroup: DIRAC group of target
:return: S_ERROR()
"""
try:
sAction = self._escapeString(action)['Value']
sIssuerDN = self._escapeString(issuerDN)['Value']
sIssuerGroup = self._escapeString(issuerGroup)['Value']
sTargetDN = self._escapeString(targetDN)['Value']
sTargetGroup = self._escapeString(targetGroup)['Value']
except KeyError:
return S_ERROR("Can't escape from death")
cmd = "INSERT INTO `ProxyDB_Log` ( Action, IssuerDN, IssuerGroup, TargetDN, TargetGroup, Timestamp ) VALUES "
cmd += "( %s, %s, %s, %s, %s, UTC_TIMESTAMP() )" % (sAction, sIssuerDN, sIssuerGroup, sTargetDN, sTargetGroup)
retVal = self._update(cmd)
if not retVal['OK']:
self.log.error("Can't add a proxy action log: ", retVal['Message'])
def purgeLogs(self):
""" Purge expired requests from the db
:return: S_OK()/S_ERROR()
"""
cmd = "DELETE FROM `ProxyDB_Log` WHERE TIMESTAMPDIFF( SECOND, Timestamp, UTC_TIMESTAMP() ) > 15552000"
return self._update(cmd)
def getLogsContent(self, selDict, sortList, start=0, limit=0):
"""
Function to get the contents of the logs table
parameters are a filter to the db
"""
fields = ("Action", "IssuerDN", "IssuerGroup", "TargetDN", "TargetGroup", "Timestamp")
cmd = "SELECT %s FROM `ProxyDB_Log`" % ", ".join(fields)
if selDict:
qr = []
if 'beforeDate' in selDict:
qr.append("Timestamp < %s" % self._escapeString(selDict['beforeDate'])['Value'])
del selDict['beforeDate']
if 'afterDate' in selDict:
qr.append("Timestamp > %s" % self._escapeString(selDict['afterDate'])['Value'])
del selDict['afterDate']
for field in selDict:
qr.append("(%s)" %
" OR ".join(["%s=%s" %
(field, self._escapeString(str(value))['Value']) for value in selDict[field]]))
whereStr = " WHERE %s" % " AND ".join(qr)
cmd += whereStr
else:
whereStr = ""
if sortList:
cmd += " ORDER BY %s" % ", ".join(["%s %s" % (sort[0], sort[1]) for sort in sortList])
if limit:
cmd += " LIMIT %d,%d" % (start, limit)
retVal = self._query(cmd)
if not retVal['OK']:
return retVal
data = retVal['Value']
totalRecords = len(data)
cmd = "SELECT COUNT( Timestamp ) FROM `ProxyDB_Log`"
cmd += whereStr
retVal = self._query(cmd)
if retVal['OK']:
totalRecords = retVal['Value'][0][0]
return S_OK({'ParameterNames': fields, 'Records': data, 'TotalRecords': totalRecords})
def generateToken(self, requesterDN, requesterGroup, numUses=1, lifeTime=0, retries=10):
""" Generate and return a token and the number of uses for the token
:param basestring requesterDN: DN of requester
:param basestring requesterGroup: DIRAC group of requester
:param int numUses: number of uses
:param int lifeTime: proxy live time in a seconds
:param int retries: number of retries
:return: S_OK(tuple)/S_ERROR() -- tuple with token and number of uses
"""
if not lifeTime:
lifeTime = gConfig.getValue("/DIRAC/VOPolicy/TokenLifeTime", self.__defaultTokenLifetime)
maxUses = gConfig.getValue("/DIRAC/VOPolicy/TokenMaxUses", self.__defaultTokenMaxUses)
numUses = max(1, min(numUses, maxUses))
m = hashlib.md5()
rndData = "%s.%s.%s.%s" % (time.time(), random.random(), numUses, lifeTime)
m.update(rndData)
token = m.hexdigest()
fieldsSQL = ", ".join(("Token", "RequesterDN", "RequesterGroup", "ExpirationTime", "UsesLeft"))
valuesSQL = ", ".join((self._escapeString(token)['Value'],
self._escapeString(requesterDN)['Value'],
self._escapeString(requesterGroup)['Value'],
"TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() )" % int(lifeTime),
str(numUses)))
insertSQL = "INSERT INTO `ProxyDB_Tokens` ( %s ) VALUES ( %s )" % (fieldsSQL, valuesSQL)
result = self._update(insertSQL)
if result['OK']:
return S_OK((token, numUses))
if result['Message'].find("uplicate entry") > -1:
if retries:
return self.generateToken(numUses, lifeTime, retries - 1)
return S_ERROR("Max retries reached for token generation. Aborting")
return result
def purgeExpiredTokens(self):
""" Purge expired tokens from the db
:return: S_OK(boolean)/S_ERROR()
"""
delSQL = "DELETE FROM `ProxyDB_Tokens` WHERE ExpirationTime < UTC_TIMESTAMP() OR UsesLeft < 1"
return self._update(delSQL)
def useToken(self, token, requesterDN, requesterGroup):
""" Uses of token count
:param basestring token: token
:param basestring requesterDN: DN of requester
:param basestring requesterGroup: DIRAC group of requester
:return: S_OK(boolean)/S_ERROR()
"""
sqlCond = " AND ".join(("UsesLeft > 0",
"Token=%s" % self._escapeString(token)['Value'],
"RequesterDN=%s" % self._escapeString(requesterDN)['Value'],
"RequesterGroup=%s" % self._escapeString(requesterGroup)['Value'],
"ExpirationTime >= UTC_TIMESTAMP()"))
updateSQL = "UPDATE `ProxyDB_Tokens` SET UsesLeft = UsesLeft - 1 WHERE %s" % sqlCond
result = self._update(updateSQL)
if not result['OK']:
return result
return S_OK(result['Value'] > 0)
def __cleanExpNotifs(self):
""" Clean expired notifications from the db
:return: S_OK()/S_ERROR()
"""
cmd = "DELETE FROM `ProxyDB_ExpNotifs` WHERE ExpirationTime < UTC_TIMESTAMP()"
return self._update(cmd)
# FIXME: Add clean proxy
def sendExpirationNotifications(self):
""" Send notification about expiration
:return: S_OK(list)/S_ERROR() -- tuple list of user DN, group and proxy left time
"""
result = self.__cleanExpNotifs()
if not result['OK']:
return result
cmd = "SELECT UserDN, UserGroup, LifeLimit FROM `ProxyDB_ExpNotifs`"
result = self._query(cmd)
if not result['OK']:
return result
notifDone = dict([((row[0], row[1]), row[2]) for row in result['Value']])
notifLimits = sorted([int(x) for x in self.getCSOption("NotificationTimes", ProxyDB.NOTIFICATION_TIMES)])
sqlSel = "UserDN, UserGroup, TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime )"
sqlCond = "TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), ExpirationTime ) < %d" % max(notifLimits)
cmd = "SELECT %s FROM `ProxyDB_Proxies` WHERE %s" % (sqlSel, sqlCond)
result = self._query(cmd)
if not result['OK']:
return result
pilotProps = (Properties.GENERIC_PILOT, Properties.PILOT)
data = result['Value']
sent = []
for row in data:
userDN, group, lTime = row
# If it's a pilot proxy, skip it
if Registry.groupHasProperties(group, pilotProps):
continue
# IF it dosn't hace the auto upload proxy, skip it
if not Registry.getGroupOption(group, "AutoUploadProxy", False):
continue
notKey = (userDN, group)
for notifLimit in notifLimits:
if notifLimit < lTime:
# Not yet in this notification limit
continue
if notKey in notifDone and notifDone[notKey] <= notifLimit:
# Already notified for this notification limit
break
if not self._notifyProxyAboutToExpire(userDN, group, lTime):
# Cannot send notification, retry later
break
try:
sUserDN = self._escapeString(userDN)['Value']
sGroup = self._escapeString(group)['Value']
except KeyError:
return S_ERROR("OOPS")
if notKey not in notifDone:
values = "( %s, %s, %d, TIMESTAMPADD( SECOND, %s, UTC_TIMESTAMP() ) )" % (sUserDN, sGroup, notifLimit, lTime)
cmd = "INSERT INTO `ProxyDB_ExpNotifs` ( UserDN, UserGroup, LifeLimit, ExpirationTime ) VALUES %s" % values
result = self._update(cmd)
if not result['OK']:
gLogger.error("Could not mark notification as sent", result['Message'])
else:
values = "LifeLimit = %d, ExpirationTime = TIMESTAMPADD( SECOND, %s, UTC_TIMESTAMP() )" % (notifLimit, lTime)
cmd = "UPDATE `ProxyDB_ExpNotifs` SET %s WHERE UserDN = %s AND UserGroup = %s" % (values, sUserDN, sGroup)
result = self._update(cmd)
if not result['OK']:
gLogger.error("Could not mark notification as sent", result['Message'])
sent.append((userDN, group, lTime))
notifDone[notKey] = notifLimit
return S_OK(sent)
def _notifyProxyAboutToExpire(self, userDN, userGroup, lTime):
""" Send notification mail about to expire
:param basestring userDN: user DN
:param basestring userGroup: DIRAC group
:param int lTime: left proxy live time in a seconds
:return: boolean
"""
result = Registry.getUsernameForDN(userDN)
if not result['OK']:
return False
userName = result['Value']
userEMail = Registry.getUserOption(userName, "Email", "")
if not userEMail:
gLogger.error("Could not discover user email", userName)
return False
daysLeft = int(lTime / 86400)
msgSubject = "Your proxy uploaded to DIRAC will expire in %d days" % daysLeft
msgBody = """\
Dear %s,
The proxy you uploaded to DIRAC will expire in aproximately %d days. The proxy
information is:
DN: %s
Group: %s
If you plan on keep using this credentials please upload a newer proxy to
DIRAC by executing:
$ dirac-proxy-init -P -g %s --rfc
If you have been issued different certificate, please make sure you have a
proxy uploaded with that certificate.
Cheers,
DIRAC's Proxy Manager
""" % (userName, daysLeft, userDN, userGroup, userGroup)
fromAddr = self.getFromAddr()
result = self.__notifClient.sendMail(userEMail, msgSubject, msgBody, fromAddress=fromAddr)
if not result['OK']:
gLogger.error("Could not send email", result['Message'])
return False
return True
|
chaen/DIRAC
|
FrameworkSystem/DB/ProxyDB.py
|
Python
|
gpl-3.0
| 56,234
|
[
"DIRAC"
] |
1af1e2dc6abde435dba0a352c62e8c82d58d54ff91a7540451d9b430ad72db7e
|
'''
Nacker is a tool to circumvent 802.1x Network Access Control (NAC) on
a wired LAN.
Copyright (C) 2013 Carsten Maartmann-Moe
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on Aug 29, 2013
@author: Carsten Maartmann-Moe <carsten@carmaa.com> aka ntropy
'''
from scapy.all import *
def ping(ip):
TIMEOUT = 2
conf.verb = 0
print('Pinging {0}'.format(ip))
packet = IP(dst=ip, ttl=20)/ICMP()
reply = sr1(packet, timeout=TIMEOUT)
if not (reply is None):
print reply.src, "is online"
return(True)
else:
print "Timeout waiting for %s" % packet[IP].src
return(False)
|
mehulsbhatt/nacker
|
caravan/icmp.py
|
Python
|
gpl-2.0
| 1,202
|
[
"MOE"
] |
0e35f069cd8d57f83fa4aa43a0ec1e27d864dee0283e359e9091cc38bff83bbc
|
"""Version information for MayaVi2.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2006, Enthought, Inc.
# License: BSD Style.
from mayavi.version import version as __version__
|
dmsurti/mayavi
|
mayavi/__version__.py
|
Python
|
bsd-3-clause
| 205
|
[
"Mayavi"
] |
11d9ec5bf2dd24a096852c298724fa6f173e8b9d2a9dd5c84a0c71d2d4f37baa
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CDataObject(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CRootContainer.addDatamodel()
self.model=self.datamodel.getModel()
self.compartment=self.model.createCompartment("Comp1")
self.object=self.model.createMetabolite("Metab1","Comp1")
self.model.compileIfNecessary()
def test_getObjectName(self):
t=self.object.getObjectName()
self.assert_(type(t)==StringType)
def test_setObjectName(self):
NAME="MyObject"
self.object.setObjectName(NAME)
self.assert_(self.object.getObjectName()==NAME)
def test_getObjectDisplayName(self):
t=self.object.getObjectDisplayName()
self.assert_(type(t)==StringType)
def test_getObjectType(self):
t=self.object.getObjectType()
self.assert_(type(t)==StringType)
def test_getObjectParent(self):
parent=self.object.getObjectParent()
self.assert_(parent!=None)
self.assert_(parent.__class__==COPASI.MetabVectorNS)
self.assert_(parent.getKey()==self.compartment.getMetabolites().getKey())
def test_getCN(self):
cn=self.object.getCN()
self.assert_(cn.__class__==COPASI.CCommonName)
def test_isContainer(self):
result=self.object.isContainer()
self.assert_(type(result)==BooleanType)
def test_isVector(self):
result=self.object.isVector()
self.assert_(type(result)==BooleanType)
def test_isMatrix(self):
result=self.object.isMatrix()
self.assert_(type(result)==BooleanType)
def test_isNameVector(self):
result=self.object.isNameVector()
self.assert_(type(result)==BooleanType)
def test_isReference(self):
result=self.object.isReference()
self.assert_(type(result)==BooleanType)
def test_isValueBool(self):
result=self.object.isValueBool()
self.assert_(type(result)==BooleanType)
def test_isValueInt(self):
result=self.object.isValueInt()
self.assert_(type(result)==BooleanType)
def test_isValueDbl(self):
result=self.object.isValueDbl()
self.assert_(type(result)==BooleanType)
def test_isNonUniqueName(self):
result=self.object.isNonUniqueName()
self.assert_(type(result)==BooleanType)
def test_isStaticString(self):
result=self.object.isStaticString()
self.assert_(type(result)==BooleanType)
def test_isValueString(self):
result=self.object.isValueString()
self.assert_(type(result)==BooleanType)
def test_isSeparator(self):
result=self.object.isSeparator()
self.assert_(type(result)==BooleanType)
def test_getKey(self):
key=self.object.getKey()
self.assert_(type(key)==StringType)
def suite():
tests=[
'test_getObjectName'
,'test_setObjectName'
,'test_getObjectDisplayName'
,'test_getObjectType'
,'test_getObjectParent'
,'test_getCN'
,'test_isContainer'
,'test_isVector'
,'test_isMatrix'
,'test_isNameVector'
,'test_isReference'
,'test_isValueBool'
,'test_isValueInt'
,'test_isValueDbl'
,'test_isNonUniqueName'
,'test_isStaticString'
,'test_isValueString'
,'test_isSeparator'
,'test_getKey'
]
return unittest.TestSuite(map(Test_CDataObject,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
jonasfoe/COPASI
|
copasi/bindings/python/unittests/Test_CCopasiObject.py
|
Python
|
artistic-2.0
| 4,243
|
[
"COPASI"
] |
b16d656e8b0d22d3231f4622dd52165c736b7858794d4b3d7e200fc27040b83e
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
assert_,
assert_array_equal,
)
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
DLP_CONFIG,
DLP_CONFIG_order,
DLP_CONFIG_minimal,
DLP_HISTORY,
DLP_HISTORY_order,
DLP_HISTORY_minimal,
)
class DLPUniverse(ParserBase):
def test_creates_universe(self):
u = mda.Universe(self.filename, topology_format=self.format)
assert_(isinstance(u, mda.Universe))
class DLPBase2(DLPUniverse):
expected_attrs = ['ids', 'names']
guessed_attrs = ['types', 'masses']
expected_n_atoms = 216
expected_n_residues = 1
expected_n_segments = 1
def test_names(self):
assert_(self.top.names.values[0] == 'K+')
assert_(self.top.names.values[4] == 'Cl-')
class TestDLPHistoryParser(DLPBase2):
parser = mda.topology.DLPolyParser.HistoryParser
filename = DLP_HISTORY
format='HISTORY'
class TestDLPConfigParser(DLPBase2):
parser = mda.topology.DLPolyParser.ConfigParser
filename = DLP_CONFIG
format='CONFIG'
class DLPBase(DLPUniverse):
expected_attrs = ['ids', 'names']
guessed_attrs = ['types', 'masses']
expected_n_atoms = 3
expected_n_residues = 1
expected_n_segments = 1
def test_dlp_names(self):
assert_array_equal(self.top.names.values,
['C', 'B', 'A'])
class TestDLPConfigMinimal(DLPBase):
parser = mda.topology.DLPolyParser.ConfigParser
filename = DLP_CONFIG_minimal
format='CONFIG'
class TestDLPConfigOrder(DLPBase):
parser = mda.topology.DLPolyParser.ConfigParser
filename = DLP_CONFIG_order
format='CONFIG'
class TestDLPHistoryMinimal(DLPBase):
parser = mda.topology.DLPolyParser.HistoryParser
filename = DLP_HISTORY_minimal
format='HISTORY'
class TestDLPHistoryOrder(DLPBase):
parser = mda.topology.DLPolyParser.HistoryParser
filename = DLP_HISTORY_order
format='HISTORY'
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/topology/test_dlpoly.py
|
Python
|
gpl-2.0
| 3,036
|
[
"MDAnalysis"
] |
f085d0c3299e13b498a7dc738f8d18ded62ed60ade71aa1e47d9e8e7edc150d4
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
from h2o.grid.grid_search import H2OGridSearch
# In this test, we check to make sure that a grid search on a GAM works with hyperparameters containing subspaces.
# The test compares the results of the grid search models with the models we created
# by manually searching over the hyperspace.
# If the coefficients do not match or an incorrect number of models is generated, the test throws an assertion error.
class test_gam_gridsearch_specific:
h2o_data = []
myX = []
myY = []
search_criteria = {'strategy': 'Cartesian'}
hyper_parameters = {'alpha': [0.9, 0.01],
'subspaces': [
{'scale': [[1, 1, 1], [2, 2, 2]],
'num_knots': [[5, 5, 5], [5, 6, 7]],
'gam_columns': [["C11", "C12", "C13"]]},
{'scale': [[1, 1], [2, 2]],
'num_knots': [[5, 5], [6, 6]],
'gam_columns': [["C11", "C12"], ["C12", "C13"]]}]}
manual_gam_models = []
h2o_model = []
num_grid_models = 0
num_expected_models = 24
def __init__(self):
self.setup_data()
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
self.h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
self.h2o_data["C1"] = self.h2o_data["C1"].asfactor()
self.h2o_data["C2"] = self.h2o_data["C2"].asfactor()
self.myX = ["C1", "C2"]
self.myY = "C21"
for alpha in self.hyper_parameters["alpha"]:
for subspace in self.hyper_parameters["subspaces"]:
for scale in subspace['scale']:
for gam_columns in subspace['gam_columns']:
for num_knots in subspace['num_knots']:
self.manual_gam_models.append(H2OGeneralizedAdditiveEstimator(family="gaussian",
gam_columns=gam_columns,
keep_gam_cols=True,
scale=scale,
num_knots=num_knots,
alpha=alpha
))
def train_models(self):
self.h2o_model = H2OGridSearch(H2OGeneralizedAdditiveEstimator(
family="gaussian", keep_gam_cols=True), self.hyper_parameters, search_criteria=self.search_criteria)
self.h2o_model.train(x=self.myX, y=self.myY, training_frame=self.h2o_data)
for model in self.manual_gam_models:
model.train(x = self.myX, y = self.myY, training_frame = self.h2o_data)
def match_models(self):
for model in self.manual_gam_models:
gam_columns = model.actual_params['gam_columns']
scale = model.actual_params['scale']
num_knots = model.actual_params['num_knots']
alpha = model.actual_params['alpha']
for grid_search_model in self.h2o_model.models:
if grid_search_model.actual_params['gam_columns'] == gam_columns \
and grid_search_model.actual_params['scale'] == scale \
and grid_search_model.actual_params['num_knots'] == num_knots \
and grid_search_model.actual_params['alpha'] == alpha:
self.num_grid_models += 1
assert grid_search_model.coef() == model.coef(), "coefficients should be equal"
break
assert self.num_grid_models == self.num_expected_models, "Grid search model parameters incorrect or incorrect number of models generated"
def test_gridsearch_specific():
test_gam_grid = test_gam_gridsearch_specific()
test_gam_grid.train_models()
test_gam_grid.match_models()
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gridsearch_specific)
else:
test_gridsearch_specific()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7367_gridsearch_subspaces.py
|
Python
|
apache-2.0
| 4,652
|
[
"Gaussian"
] |
ec57b140239da5be9f1669c800ef9d906c46d74b33c5c5795b462ec8125256dc
|
import time
from nxtools import logging, s2tc
from firefly.qt import (
Qt,
QLineEdit,
QTextEdit,
QFontDatabase,
QFont,
QSpinBox,
QCheckBox,
QPushButton,
QColorDialog,
QColor,
)
class FireflyString(QLineEdit):
def __init__(self, parent, **kwargs):
super(FireflyString, self).__init__(parent)
self.default = self.get_value()
def set_value(self, value):
if value == self.get_value():
return
self.setText(str(value))
self.default = self.get_value()
def get_value(self):
return self.text()
class FireflyText(QTextEdit):
def __init__(self, parent, **kwargs):
super(FireflyText, self).__init__(parent)
fixed_font = QFontDatabase.systemFont(QFontDatabase.FixedFont)
fixed_font.setStyleHint(QFont.Monospace)
self.setCurrentFont(fixed_font)
self.setTabChangesFocus(True)
self.default = self.get_value()
def set_value(self, value):
if value == self.get_value():
return
self.setText(str(value))
self.default = self.get_value()
def get_value(self):
return self.toPlainText()
def insertFromMimeData(self, source):
self.insertPlainText(source.text())
class FireflyInteger(QSpinBox):
def __init__(self, parent, **kwargs):
super(FireflyInteger, self).__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
self.setMinimum(kwargs.get("min", 0))
self.setMaximum(kwargs.get("max", 99999))
if kwargs.get("hide_null"):
logging.info("HIDE NULL")
self.setMinimum(0)
self.setSpecialValueText(" ")
self.setSingleStep(1)
self.default = self.get_value()
def wheelEvent(self, event):
if self.hasFocus():
super(FireflyInteger, self).wheelEvent(event)
else:
event.ignore()
def set_value(self, value):
if value == self.get_value():
return
self.setValue(int(value))
self.default = self.get_value()
def get_value(self):
return int(self.value())
class FireflyNumeric(QSpinBox):
def __init__(self, parent, **kwargs):
super(FireflyNumeric, self).__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
self.setMinimum(kwargs.get("min", -99999))
self.setMaximum(kwargs.get("max", 99999))
if kwargs.get("hide_null"):
logging.info("HIDE NULL")
self.setMinimum(0)
self.setSpecialValueText(" ")
# TODO: custom step (default 1, allow floats)
self.default = self.get_value()
def wheelEvent(self, event):
if self.hasFocus():
super(FireflyNumeric, self).wheelEvent(event)
else:
event.ignore()
def set_value(self, value):
if value == self.get_value():
return
self.setValue(int(value))
self.default = self.get_value()
def get_value(self):
return self.value()
class FireflyDatetime(QLineEdit):
def __init__(self, parent, **kwargs):
super(FireflyDatetime, self).__init__(parent)
mode = kwargs.get("mode", "datetime")
if mode == "date":
self.mask = "9999-99-99"
self.format = "%Y-%m-%d"
elif mode == "year":
self.mask = "9999"
self.format = "%Y"
elif mode == "datetime":
self.mask = "9999-99-99 99:99"
self.format = "%Y-%m-%d %H:%M"
if kwargs.get("show_seconds", False):
self.mask += ":99"
self.format += ":%S"
self.setInputMask(self.mask)
self.default = self.get_value()
def set_value(self, timestamp):
self.setInputMask("")
if timestamp:
tt = time.localtime(timestamp)
self.setText(time.strftime(self.format, tt))
else:
self.setText(self.format.replace("9", "-"))
self.setInputMask(self.mask)
self.default = self.get_value()
def get_value(self):
if not self.text().replace("-", "").replace(":", "").strip():
return float(0)
t = time.strptime(self.text(), self.format)
return float(time.mktime(t))
class FireflyTimecode(QLineEdit):
def __init__(self, parent, **kwargs):
super(FireflyTimecode, self).__init__(parent)
self.fps = kwargs.get("fps", 25.0)
self.setInputMask("99:99:99:99")
self.setText("00:00:00:00")
self.default = self.get_value()
fm = self.fontMetrics()
w = fm.boundingRect(self.text()).width() + 16
self.setMinimumWidth(w)
self.setMaximumWidth(w)
def set_value(self, value):
self.setText(s2tc(value, self.fps))
self.setCursorPosition(0)
self.default = self.get_value()
def get_value(self):
hh, mm, ss, ff = [int(i) for i in self.text().split(":")]
return (hh * 3600) + (mm * 60) + ss + (ff / self.fps)
class FireflyBoolean(QCheckBox):
def __init__(self, parent, **kwargs):
super(FireflyBoolean, self).__init__(parent)
self.default = self.get_value()
def setReadOnly(self, val):
self.setEnabled(not val)
def set_value(self, value):
self.setChecked(bool(value))
def get_value(self):
return self.isChecked()
class FireflyColorPicker(QPushButton):
def __init__(self, parent, **kwargs):
super(FireflyColorPicker, self).__init__(parent)
self.color = 0
self.clicked.connect(self.execute)
def execute(self):
color = int(QColorDialog.getColor(QColor(self.color)).rgb())
self.set_value(color)
def get_value(self):
return self.color
def set_value(self, value):
self.color = value
self.setStyleSheet(f"background-color: #{self.color:06x}")
def setReadOnly(self, stat):
self.setEnabled(not stat)
|
immstudios/firefly
|
firefly/widgets/simple.py
|
Python
|
gpl-3.0
| 5,969
|
[
"Firefly"
] |
244f522980395e5d1c919b33a18ef3f9816662b68d364c8f1b5e703b3338824f
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Base model trainer."""
import json
import os
import random
import shutil
import time
from absl import logging
import numpy as np
from sklearn.mixture import GaussianMixture as GMM
from sklearn.svm import OneClassSVM
import tensorflow as tf
from tqdm import trange
from data.celeba import CelebA
from data.cifar import CIFAROOD
from data.dogvscat import DogVsCatOOD
from data.fmnist import FashionMNISTOOD
from model import resnet as model
import util.metric as util_metric
from util.scheduler import CustomLearningRateSchedule as CustomSchedule
_SUPPORTED_DATASET = frozenset([
'cifar10ood', 'cifar20ood', 'cifar100ood', 'fashion_mnistood', 'fmnistood',
'dogvscatood', 'dvcood', 'celeba'
])
def setup_tf():
logging.set_verbosity(logging.ERROR)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if not physical_devices:
logging.info('No GPUs are detected')
for dev in physical_devices:
tf.config.experimental.set_memory_growth(dev, True)
return tf.distribute.MirroredStrategy()
class BaseTrain(object):
"""Base model trainer.
Model constructor:
Parameters
Data loader
Model architecture
Optimizer
Model trainer:
Custom train loop
Evaluation loop
"""
def __init__(self, hparams):
self.strategy = setup_tf()
self.hparams = hparams
# data
self.is_validation = hparams.is_validation
self.root = hparams.root
self.dataset = hparams.dataset
self.category = hparams.category
self.aug_list = hparams.aug_list.split(',')
self.aug_list_for_test = hparams.aug_list_for_test.split(
',') if hparams.aug_list_for_test is not None else None
self.input_shape = tuple(
[int(float(s)) for s in hparams.input_shape.split(',')])
try:
self.distaug_type = int(hparams.distaug_type)
except ValueError:
self.distaug_type = hparams.distaug_type
# network architecture
self.net_type = hparams.net_type
self.net_width = hparams.net_width
self.head_dims = tuple([int(d) for d in hparams.head_dims.split(',') if d
]) if hparams.head_dims not in [None, ''] else None
self.latent_dim = hparams.latent_dim
# optimizer
self.seed = hparams.seed
self.force_init = hparams.force_init
self.optim_type = hparams.optim_type
self.sched_type = hparams.sched_type
self.sched_freq = hparams.sched_freq
self.sched_step_size = hparams.sched_step_size
self.sched_gamma = hparams.sched_gamma
self.sched_min_rate = hparams.sched_min_rate
self.sched_level = hparams.sched_level
self.learning_rate = hparams.learning_rate
self.weight_decay = hparams.weight_decay
self.regularize_bn = hparams.regularize_bn
self.weight_decay_constraint = []
if self.regularize_bn:
self.weight_decay_constraint.append('bn')
self.momentum = hparams.momentum
self.nesterov = hparams.nesterov
self.num_epoch = hparams.num_epoch
self.num_batch = hparams.num_batch
self.batch_size = hparams.batch_size
# monitoring and checkpoint
self.ckpt_prefix = os.path.join(hparams.model_dir, hparams.ckpt_prefix)
self.ckpt_epoch = hparams.ckpt_epoch
self.file_path = hparams.file_path
# additional hparams
self.set_hparams(hparams=hparams)
self.set_metrics()
def set_random_seed(self):
seed = self.seed
if seed > 0:
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def set_hparams(self, hparams):
pass
def config(self):
"""Config."""
self.set_random_seed()
# Data loader.
self.get_dataloader()
# Model architecture.
self.model = self.get_model(
arch=self.net_type,
width=self.net_width,
head_dims=self.head_dims,
input_shape=self.input_shape,
num_class=self.latent_dim)
# Scheduler.
self.scheduler, self.sched_name = self.get_scheduler(
sched_type=self.sched_type,
step_per_epoch=1 if self.sched_freq == 'step' else self.num_batch,
max_step=self.num_epoch * self.num_batch,
learning_rate=self.learning_rate,
**{
'step_size': self.sched_step_size,
'gamma': self.sched_gamma,
'min_rate': self.sched_min_rate,
'level': self.sched_level
})
# Optimizer.
self.optimizer, self.optim_name = self.get_optimizer(
scheduler=self.scheduler,
optim_type=self.optim_type,
learning_rate=self.learning_rate,
**{
'momentum': self.momentum,
'nesterov': self.nesterov
})
# Set file path.
self.get_file_path()
def get_dataloader(self):
"""Gets the data loader."""
dl = self.get_dataset(self.root, self.dataset.lower(), self.category,
self.input_shape)
datasets = dl.load_dataset(
is_validation=self.is_validation,
aug_list=self.aug_list,
aug_list_for_test=self.aug_list_for_test,
batch_size=self.batch_size,
num_batch_per_epoch=self.num_batch,
distaug_type=self.distaug_type)
# train_loader: train data for representation learning (augmentation)
# cls_loader: train data for classifier learning (no augmentation)
# test_loader: test data
self.train_loader = datasets[0]
if isinstance(self.train_loader, (list, tuple)):
self.num_batch = self.train_loader[1]
self.train_loader = self.train_loader[0]
self.cls_loader = datasets[1]
self.test_loader = datasets[2]
self.db_name = dl.fname
if self.strategy:
self.train_loader = self.strategy.experimental_distribute_dataset(
self.train_loader)
self.cls_loader[0] = self.strategy.experimental_distribute_dataset(
self.cls_loader[0])
self.test_loader[0] = self.strategy.experimental_distribute_dataset(
self.test_loader[0])
@staticmethod
def get_dataset(root, dataset, category, input_shape):
"""Gets the dataset."""
if dataset not in _SUPPORTED_DATASET:
msg = (f'Unsupported dataset {dataset} is provided. Only '
f'{_SUPPORTED_DATASET} are available.')
raise ValueError(msg)
if dataset in ['cifar10ood', 'cifar20ood', 'cifar100ood']:
dl = CIFAROOD(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (32, 32, 3))
elif dataset in ['fashion_mnistood', 'fmnistood']:
dl = FashionMNISTOOD(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (32, 32, 3))
elif dataset in ['dogvscatood', 'dvcood']:
dl = DogVsCatOOD(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (64, 64, 3))
elif dataset == 'celeba':
dl = CelebA(
root=root,
dataset=dataset,
category=category,
input_shape=input_shape or (64, 64, 3))
return dl
@staticmethod
def get_model(arch='ResNet18',
width=1.0,
head_dims=None,
input_shape=(256, 256, 3),
num_class=2):
"""Gets the ResNet model."""
net = model.__dict__[arch](
width=width,
head_dims=head_dims,
input_shape=input_shape,
num_class=num_class)
net.summary()
return net
@staticmethod
def get_optimizer(scheduler, optim_type='sgd', learning_rate=0.03, **kwargs):
"""Gets the optimizer."""
if optim_type == 'sgd':
momentum = kwargs['momentum'] if 'momentum' in kwargs else 0.9
nesterov = kwargs['nesterov'] if 'nesterov' in kwargs else False
optimizer = tf.keras.optimizers.SGD(
learning_rate=scheduler, momentum=momentum, nesterov=nesterov)
name = 'sgd_lr{:g}_mom{:g}'.format(learning_rate, momentum)
if nesterov:
name += '_nesterov'
elif optim_type == 'adam':
optimizer = tf.keras.optimizers.Adam(
learning_rate=scheduler, amsgrad=True)
name = 'adam_lr{:g}'.format(learning_rate)
else:
raise NotImplementedError
return optimizer, name
@staticmethod
def get_scheduler(sched_type='cosine',
step_per_epoch=1,
max_step=256,
learning_rate=0.1,
**kwargs):
"""Gets the scheduler."""
scheduler = CustomSchedule(
step_per_epoch=step_per_epoch,
base_lr=learning_rate,
max_step=max_step,
mode=sched_type,
**kwargs)
return scheduler, scheduler.name
def get_file_path(self):
"""Gets the file path for saving."""
if self.file_path:
self.file_path = os.path.join(self.ckpt_prefix, self.file_path)
else:
self.file_path = os.path.join(
self.ckpt_prefix, '{}_seed{}'.format(self.db_name, self.seed),
self.model.name, '{}_{}_{}_wd{:g}_{}_epoch{}_nb{}_bs{}'.format(
self.__class__.__name__, self.optim_name, self.sched_name,
self.weight_decay, '_'.join(self.weight_decay_constraint),
self.num_epoch, self.num_batch, self.batch_size))
if self.file_suffix:
self.file_path = '{}_{}'.format(self.file_path, self.file_suffix)
self.file_path = self.file_path.replace('__', '_')
self.json_path = os.path.join(self.file_path, 'stats')
def get_current_train_epoch(self):
"""Returns current training epoch."""
return tf.math.floordiv(self.optimizer.iterations, self.num_batch).numpy()
def get_current_train_step(self):
"""Returns current training step."""
return self.optimizer.iterations
def get_checkpoint(self):
"""Restores from the checkpoint and returns start epoch."""
self.checkpoint.restore(self.manager.latest_checkpoint)
self.epoch = start_epoch = self.get_current_train_epoch()
self.step = self.get_current_train_step()
return start_epoch
def train(self):
"""Called for model training."""
start_epoch = self.train_begin()
if self.num_epoch == 0:
self.train_epoch_begin()
else:
for _ in range(start_epoch, self.num_epoch):
self.train_epoch_begin()
self.train_epoch()
self.train_epoch_end(
is_eval=False, is_save=(self.epoch % self.ckpt_epoch == 0))
self.train_epoch_end(is_eval=True, is_save=True)
self.train_end()
def train_begin(self):
"""Initializes metrics, checkpoint, summary at the beginning of training."""
self.metrics = {}
self.metrics.update({
key: tf.keras.metrics.Mean()
for key in self.list_of_metrics
if key.startswith(('loss'))
})
self.metrics.update({
key: tf.keras.metrics.Accuracy()
for key in self.list_of_metrics
if key.startswith('acc')
})
self.monitor = {
'learning_rate': 0,
'step_per_second': 0,
}
self.eval_metrics = {}
self.eval_metrics.update({key: None for key in self.list_of_eval_metrics})
if self.force_init:
shutil.rmtree(self.file_path, ignore_errors=True)
# Generate file paths
if not tf.io.gfile.isdir(self.file_path):
tf.io.gfile.makedirs(self.file_path)
if not tf.io.gfile.isdir(self.json_path):
tf.io.gfile.makedirs(self.json_path)
# Checkpoint
self.checkpoint = tf.train.Checkpoint(
optimizer=self.optimizer, model=self.model)
self.manager = tf.train.CheckpointManager(
checkpoint=self.checkpoint,
directory=os.path.join(self.file_path, 'raw'),
max_to_keep=1)
self.tensorboard_dir = os.path.join(self.file_path, 'tb')
self.summary_writer = tf.summary.create_file_writer(
logdir=self.tensorboard_dir)
# Initiate train iterator once
# Note that creating iterator every epoch slows down
# the training since it clears the data buffer
self.train_iterator = iter(self.train_loader)
self.cls_iterator = (iter(self.cls_loader[0]), self.cls_loader[1])
self.test_iterator = (iter(self.test_loader[0]), self.test_loader[1])
return self.get_checkpoint()
def train_end(self, verbose=False):
"""Saves and prints summary statistics."""
self.manager.save()
self.summary_writer.close()
if verbose:
logdir = self.tensorboard_dir
event_files = [
event for event in tf.io.gfile.glob(os.path.join(logdir, '*'))
]
event_files.sort(key=os.path.getmtime)
event_dict = {
key: []
for key in self.metrics.keys()
if not key.startswith('monitor')
}
event_dict.update({key: [] for key in self.eval_metrics.keys()})
for event_file in event_files:
for event in tf.compat.v1.train.summary_iterator(event_file):
for v in event.summary.value:
if v.tag.replace('/', '.') in event_dict:
event_dict[v.tag.replace('/', '.')].append(
tf.make_ndarray(v.tensor).tolist())
# Print stats of last 20 epochs in json format
num_epoch_to_save = 20
event_dict = {
key: event_dict[key][-num_epoch_to_save:] for key in event_dict
}
if not os.path.isdir(self.json_path):
os.makedirs(self.json_path)
summary_dict = {}
for key in event_dict:
dict_to_write = {
'median (last%02d)' % x: np.median(event_dict[key][-x:])
for x in [1, 5, 10, num_epoch_to_save]
}
dict_to_write.update(
{'last%02d' % (num_epoch_to_save): event_dict[key]})
with open(os.path.join(self.json_path, key + '.json'), 'w') as outfile:
json.dump(dict_to_write, outfile, sort_keys=True, indent=4)
if key in self.metric_of_interest:
summary_dict.update({key: dict_to_write})
with open(os.path.join(self.json_path, 'summary.json'),
'w') as outfile:
json.dump(summary_dict, outfile, sort_keys=True, indent=4)
# Print basic information
logging.info('')
logging.info('----------------------------------------------------------')
logging.info('Train is done. Below are file path and basic test stats\n')
logging.info('File path:\n')
logging.info(self.file_path)
if not isinstance(self.metric_of_interest, (list, tuple)):
self.metric_of_interest = [self.metric_of_interest]
for moi in self.metric_of_interest:
del summary_dict[moi]['last%02d' % (num_epoch_to_save)]
logging.info('Eval stats:\n')
logging.info(json.dumps(summary_dict, sort_keys=True, indent=4))
logging.info('----------------------------------------------------------')
logging.info()
else:
with tf.io.gfile.GFile(os.path.join(self.json_path, 'summary.json'),
'w') as outfile:
json.dump(self.eval_metrics, outfile, sort_keys=True, indent=4)
with tf.io.gfile.GFile(os.path.join(self.json_path, 'hparams.json'),
'w') as outfile:
json.dump(self.hparams, outfile, indent=4, sort_keys=True)
def train_epoch(self):
"""Called for model training per epoch."""
time_init = time.time()
for _ in trange(
self.num_batch,
leave=False,
desc='Epoch (train) %d/%d' % (self.epoch + 1, self.num_epoch)):
self.train_step(self.train_iterator)
self.monitor['step_per_second'] = self.num_batch / (time.time() - time_init)
def train_epoch_begin(self):
"""Called at the beginning of epoch.
- Reset metrics
- Adjust learning rate
"""
for _, metric in self.metrics.items():
metric.reset_states()
self.epoch = self.get_current_train_epoch()
self.step = self.get_current_train_step()
self.monitor['learning_rate'] = self.optimizer.learning_rate(
self.optimizer.iterations).numpy()
def train_epoch_end(self, is_eval=False, is_save=False):
"""Evaluates and monitors performance at the end of epoch."""
if is_save:
self.manager.save()
if is_eval:
self.eval_epoch(trainset=self.cls_iterator, testset=self.test_iterator)
self.monitor_progress(verbose=True)
@tf.function
def train_step(self, iterator):
"""Executes each train step."""
def step_fn(data):
replica_context = tf.distribute.get_replica_context()
xo, xc = data[0], data[1]
x = tf.concat((xo, xc), axis=0)
y = tf.concat((tf.zeros(
xo.shape[0], dtype=tf.int32), tf.ones(xc.shape[0], dtype=tf.int32)),
axis=0)
with tf.GradientTape() as tape:
logits = self.model(x, training=True)['logits']
loss_xe = tf.keras.losses.sparse_categorical_crossentropy(
y, logits, from_logits=True)
loss_xe = tf.divide(
tf.reduce_sum(loss_xe),
self.cross_replica_concat(loss_xe,
replica_context=replica_context).shape[0])
loss_l2 = self.loss_l2(self.model.trainable_weights)
loss = loss_xe + self.weight_decay * loss_l2
grad = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights))
# monitor
self.metrics['loss.train'].update_state(loss)
self.metrics['loss.xe'].update_state(loss_xe)
self.metrics['loss.L2'].update_state(loss_l2)
self.metrics['acc.train'].update_state(y, tf.argmax(logits, axis=1))
# Call one step
self.strategy.run(step_fn, args=(next(iterator),))
def loss_l2(self, var_list):
for c in self.weight_decay_constraint:
var_list = [v for v in var_list if c not in v.name]
loss_l2 = tf.add_n([tf.nn.l2_loss(v) for v in var_list])
return tf.divide(loss_l2, self.strategy.num_replicas_in_sync)
def squared_difference(self, a, b, do_normalization=True):
"""Computes (a-b) ** 2."""
if do_normalization:
a = tf.nn.l2_normalize(a, axis=1)
b = tf.nn.l2_normalize(b, axis=1)
return -2. * tf.matmul(a, b, transpose_b=True)
return tf.norm(
a, axis=1, keepdims=True)**2 + tf.transpose(
tf.norm(b, axis=1, keepdims=True)**2) - 2. * tf.matmul(
a, b, transpose_b=True)
def eval_epoch(self, trainset, testset):
self.eval_embed(trainset=trainset, testset=testset)
def eval_embed(self, trainset, testset):
"""Evaluate performance on test set."""
_, _, embeds_tr, pools_tr, _ = self.extract(trainset)
probs, dscores, embeds, pools, labels = self.extract(testset)
sim_embed = -0.5 * self.squared_difference(embeds, embeds_tr, True)
sim_pool = -0.5 * self.squared_difference(pools, pools_tr, True)
dist_embed = tf.reduce_mean(1.0 - tf.nn.top_k(sim_embed, k=1)[0], axis=1)
dist_pool = tf.reduce_mean(1.0 - tf.nn.top_k(sim_pool, k=1)[0], axis=1)
for key in self.eval_metrics:
if key.startswith('logit'):
pred = 1.0 - probs[:, 0]
elif key.startswith('dscore'):
pred = 1.0 - dscores
elif key.startswith('embed'):
pred = dist_embed
feats_tr = embeds_tr.numpy()
feats = embeds.numpy()
sim = sim_embed
elif key.startswith('pool'):
pred = dist_pool
feats_tr = pools_tr.numpy()
feats = pools.numpy()
sim = sim_pool
if 'auc' in key:
self.eval_metrics[key] = util_metric.roc(pr=pred, gt=labels)
elif 'locsvm' in key and key.startswith(('embed', 'pool')):
# Linear kernel OC-SVM.
clf = OneClassSVM(kernel='linear').fit(feats_tr)
scores = -clf.score_samples(feats)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
elif 'kocsvm' in key and key.startswith(('embed', 'pool')):
# RBF kernel OC-SVM.
feats_tr = tf.nn.l2_normalize(feats_tr, axis=1)
feats = tf.nn.l2_normalize(feats, axis=1)
# 10 times larger value of gamma.
gamma = 10. / (tf.math.reduce_variance(feats_tr) * feats_tr.shape[1])
clf = OneClassSVM(kernel='rbf', gamma=gamma).fit(feats_tr)
scores = -clf.score_samples(feats)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
elif 'kde' in key and key.startswith(('embed', 'pool')):
# RBF kernel density estimation.
feats_tr = tf.nn.l2_normalize(feats_tr, axis=1)
gamma = 10. / (tf.math.reduce_variance(feats_tr) * feats_tr.shape[1])
scores = None
batch_size_for_kde = 100
num_iter = int(np.ceil(sim.shape[0] / batch_size_for_kde))
for i in range(num_iter):
sim_batch = sim[i * batch_size_for_kde:(i + 1) * batch_size_for_kde]
scores_batch = -tf.divide(
tf.reduce_logsumexp(2 * gamma * sim_batch, axis=1), gamma)
scores = scores_batch if scores is None else tf.concat(
(scores, scores_batch), axis=0)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
elif 'gde' in key and key.startswith(('embed', 'pool')):
# Gaussian density estimation with full covariance.
feats_tr = tf.nn.l2_normalize(feats_tr, axis=1)
feats = tf.nn.l2_normalize(feats, axis=1)
km = GMM(n_components=1, init_params='kmeans', covariance_type='full')
km.fit(feats_tr)
scores = -km.score_samples(feats)
self.eval_metrics[key] = util_metric.roc(pr=scores, gt=labels)
def extract(self, dataset):
"""Extract logits, embeds, pool, and labels."""
outputs = {
'logits': None,
'dscore': None,
'embeds': None,
'pools': None,
'labels': None
}
inference = self.model
iterator, num_batch = dataset[0], dataset[1]
if self.aug_list_for_test is not None:
num_aug = len(self.aug_list_for_test)
else:
num_aug = 1
for _ in trange(
num_batch,
leave=False,
desc='Extract %d/%d' % (self.epoch + 1, self.num_epoch)):
logits, embeds, pools, y = self.extract_step(iterator, inference)
if num_aug > 1:
probs = tf.nn.softmax(logits, axis=1)
probs = tf.split(probs, num_aug)
dscore = tf.math.exp(
tf.reduce_sum(
tf.math.log(
tf.concat([probs[i][:, i:i + 1] for i in range(len(probs))],
axis=1)),
axis=1))
logits = tf.split(logits, num_aug)[0]
embeds = tf.split(embeds, num_aug)[0]
pools = tf.split(pools, num_aug)[0]
else:
dscore = tf.nn.softmax(logits, axis=1)[:, 0]
outputs['logits'] = self.smart_concat(outputs['logits'], logits)
outputs['dscore'] = self.smart_concat(outputs['dscore'], dscore)
outputs['embeds'] = self.smart_concat(outputs['embeds'], embeds)
outputs['pools'] = self.smart_concat(outputs['pools'], pools)
outputs['labels'] = self.smart_concat(outputs['labels'], y)
return (tf.nn.softmax(outputs['logits'], axis=1), outputs['dscore'],
outputs['embeds'], outputs['pools'], tf.squeeze(outputs['labels']))
@tf.function
def extract_step(self, iterator, inference):
"""Feature extract step."""
def step_fn(data):
"""Step."""
x, y = data[0:-2], data[-2]
output = inference(tf.concat(x, axis=0), training=False)
return (output['logits'], output['embeds'], output['pools'], y)
out = self.strategy.run(step_fn, args=(next(iterator),))
return [tf.concat(self.strategy.unwrap(o), axis=0) for o in out]
def monitor_progress(self, verbose=False):
"""Monitor train/eval variables."""
# Tensorboard
with self.summary_writer.as_default():
vis_step = (self.epoch + 1) * self.num_batch
for key, metric in self.metrics.items():
tf.summary.scalar(
key.replace('.', '/', 1), metric.result(), step=vis_step)
tf.summary.scalar(
'monitor/step_per_second',
self.monitor['step_per_second'],
step=vis_step)
tf.summary.scalar(
'monitor/lr', self.monitor['learning_rate'], step=vis_step)
if verbose:
for key, metric in self.eval_metrics.items():
if metric is not None:
tf.summary.scalar(key.replace('.', '/', 1), metric, step=vis_step)
# Command line.
template = ('Epoch {epoch:4d}/{max_epoch:4d}\tstep(sec): '
'{step_per_second:.3f}\tLoss: {loss:.3f}\tAcc: {acc:.3f}')
logging.info(
template.format(
epoch=self.epoch + 1,
max_epoch=self.num_epoch,
step_per_second=self.monitor['step_per_second'],
loss=self.metrics['loss.train'].result(),
acc=self.metrics['acc.train'].result()))
@staticmethod
def smart_concat(var1, var2):
"""Smart concat."""
def _smart_concat(var1, var2):
return var2 if var1 is None else tf.concat((var1, var2), axis=0)
if isinstance(var2, list):
if var1 is not None:
assert isinstance(var1, list)
return [_smart_concat(v1, v2) for v1, v2 in zip(var1, var2)]
else:
return var2
else:
if var1 is not None:
assert not isinstance(var1, list)
return _smart_concat(var1, var2)
@staticmethod
def cross_replica_concat(tensor, replica_context=None):
"""Reduces a concatenation of the `tensor` across TPU cores.
Args:
tensor: tensor to concatenate.
replica_context: A `replica_context`. If not set, CPU execution is
assumed.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
if replica_context is None or replica_context.num_replicas_in_sync <= 1:
return tensor
num_replicas = replica_context.num_replicas_in_sync
with tf.name_scope('cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[replica_context.replica_id_in_sync_group]],
updates=[tensor],
shape=[num_replicas] + tensor.shape.as_list())
# As every value is only present on one replica and 0 in all others,
# adding them all together will result in the full tensor on all replicas.
ext_tensor = replica_context.all_reduce(tf.distribute.ReduceOp.SUM,
ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
|
google-research/deep_representation_one_class
|
util/train.py
|
Python
|
apache-2.0
| 27,277
|
[
"Gaussian"
] |
373b3c56b027a42fc91c497a9387e6964a8f76e23b36c919dace9461f180595c
|
import os
from setuptools import setup
from setuptools import find_packages
version = '0.1'
shortdesc = "Search for bda.plone.shop"
longdesc = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')).read()
#longdesc += open(os.path.join(os.path.dirname(__file__), 'LICENSE.rst')).read()
setup(
name='bda.plone.shopsearch',
version=version,
description=shortdesc,
long_description=longdesc,
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
author='Espen Moe-Nilssen',
author_email='espen@medialog.ni',
license='GNU General Public Licence',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['bda', 'bda.plone'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'Plone',
'z3c.jbot',
],
extras_require={
'test': [
'plone.app.testing',
]
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
|
espenmn/bda.plone.shopsearch
|
setup.py
|
Python
|
bsd-3-clause
| 1,314
|
[
"MOE"
] |
02876f74f27baca7dcf50e894ea3d0fb8d168d84fa7fa4bdf87e2d7040389cac
|
########################################################################
# $HeadURL $
# File: FTSAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/31 10:00:13
########################################################################
""" :mod: FTSAgent
==============
.. module: FTSAgent
:synopsis: agent propagating scheduled RMS request in FTS
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
DIRAC agent propagating scheduled RMS request in FTS
Request processing phases (each in a separate thread):
1. MONITOR
...active FTSJobs, prepare FTSFiles dictionary with files to submit, fail, register and reschedule
2. CHECK REPLICAS
...just in case if all transfers are done, if yes, end processing
3. FAILED FILES:
...if at least one Failed FTSFile is found, set Request.Operation.File to 'Failed', end processing
4. UPDATE Waiting#SourceSE FTSFiles
...if any found in FTSDB
5. REGISTER REPLICA
...insert RegisterReplica operation to request, if some FTSFiles failed to register, end processing
6. RESCHEDULE FILES
...for FTSFiles failed with missing sources error
7. SUBMIT
...but read 'Waiting' FTSFiles first from FTSDB and merge those with FTSFiles to retry
"""
__RCSID__ = "$Id: $"
# #
# @file FTSAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/31 10:00:51
# @brief Definition of FTSAgent class.
# # imports
import time
import datetime
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gMonitor
# # from CS
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN
# # from Core
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Time import fromString
from DIRAC.Core.Utilities.List import breakListIntoChunks
# # from DMS
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.private.FTSGraph import FTSGraph
from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
# # from RMS
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
# # from RSS
# #from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # from Resources
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
# # from Accounting
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
# # agent base name
AGENT_NAME = "DataManagement/FTSAgent"
class escapeTry( Exception ):
pass
########################################################################
class FTSAgent( AgentModule ):
"""
.. class:: FTSAgent
Agent propagating Scheduled request to Done or Failed state in the FTS system.
Requests and associated FTSJobs (and so FTSFiles) are kept in cache.
"""
# # fts graph refresh in seconds
FTSGRAPH_REFRESH = FTSHistoryView.INTERVAL / 2
# # SE R/W access refresh in seconds
RW_REFRESH = 600
# # placeholder for max job per channel
MAX_ACTIVE_JOBS = 50
# # min threads
MIN_THREADS = 1
# # max threads
MAX_THREADS = 10
# # files per job
MAX_FILES_PER_JOB = 100
# # MAX FTS transfer per FTSFile
MAX_ATTEMPT = 256
# # stage flag
PIN_TIME = 0
# # FTS submission command
SUBMIT_COMMAND = 'glite-transfer-submit'
# # FTS monitoring command
MONITOR_COMMAND = 'glite-transfer-status'
# # placeholder for FTS client
__ftsClient = None
# # placeholder for request client
__requestClient = None
# # placeholder for resources helper
__resources = None
# # placeholder for RSS client
__rssClient = None
# # placeholder for FTSGraph
__ftsGraph = None
# # graph regeneration time delta
__ftsGraphValidStamp = None
# # r/w access valid stamp
__rwAccessValidStamp = None
# # placeholder for threadPool
__threadPool = None
# # update lock
__updateLock = None
# # se cache
__seCache = dict()
# # request cache
__reqCache = dict()
def updateLock( self ):
""" update lock """
if not self.__updateLock:
self.__updateLock = LockRing().getLock( "FTSAgentLock" )
return self.__updateLock
@classmethod
def requestClient( cls ):
""" request client getter """
if not cls.__requestClient:
cls.__requestClient = ReqClient()
return cls.__requestClient
@classmethod
def ftsClient( cls ):
""" FTS client """
if not cls.__ftsClient:
cls.__ftsClient = FTSClient()
return cls.__ftsClient
@classmethod
def rssClient( cls ):
""" RSS client getter """
if not cls.__rssClient:
cls.__rssClient = ResourceStatus()
return cls.__rssClient
@classmethod
def getSE( cls, seName ):
""" keep SEs in cache """
if seName not in cls.__seCache:
cls.__seCache[seName] = StorageElement( seName )
return cls.__seCache[seName]
@classmethod
def getSECache( cls ):
return cls.__seCache
@classmethod
def getRequest( cls, reqName ):
""" get Requests systematically and refresh cache """
getRequest = cls.requestClient().getRequest( reqName )
if not getRequest["OK"]:
cls.__reqCache.pop( reqName, None )
return getRequest
getRequest = getRequest["Value"]
if not getRequest:
cls.__reqCache.pop( reqName, None )
return S_ERROR( "request of name '%s' not found in ReqDB" % reqName )
cls.__reqCache[reqName] = getRequest
return S_OK( cls.__reqCache[reqName] )
@classmethod
def putRequest( cls, request, clearCache = True ):
""" put request back to ReqDB
:param Request request: Request instance
:param bool clearCache: clear the cache?
also finalize request if status == Done
"""
# # put back request
if request.RequestName not in cls.__reqCache:
return S_OK()
put = cls.requestClient().putRequest( request )
if not put["OK"]:
return put
# # finalize first if possible
if request.Status == "Done" and request.JobID:
finalizeRequest = cls.requestClient().finalizeRequest( request.RequestName, request.JobID )
if not finalizeRequest["OK"]:
request.Status = "Scheduled"
# # del request from cache if needed
if clearCache:
cls.__reqCache.pop( request.RequestName, None )
return S_OK()
@classmethod
def putFTSJobs( cls, ftsJobsList ):
""" put back fts jobs to the FTSDB """
for ftsJob in ftsJobsList:
put = cls.ftsClient().putFTSJob( ftsJob )
if not put["OK"]:
return put
return S_OK()
@staticmethod
def updateFTSFileDict( ftsFilesDict, toUpdateDict ):
""" update :ftsFilesDict: with FTSFiles in :toUpdateDict: """
for category, ftsFileList in ftsFilesDict.items():
for ftsFile in toUpdateDict.get( category, [] ):
if ftsFile not in ftsFileList:
ftsFileList.append( ftsFile )
return ftsFilesDict
# def resources( self ):
# """ resource helper getter """
# if not self.__resources:
# self.__resources = Resources()
# return self.__resources
def threadPool( self ):
""" thread pool getter """
if not self.__threadPool:
self.__threadPool = ThreadPool( self.MIN_THREADS, self.MAX_THREADS )
self.__threadPool.daemonize()
return self.__threadPool
def resetFTSGraph( self ):
""" create fts graph """
log = gLogger.getSubLogger( "ftsGraph" )
ftsHistory = self.ftsClient().getFTSHistory()
if not ftsHistory["OK"]:
log.error( "unable to get FTS history: %s" % ftsHistory["Message"] )
return ftsHistory
ftsHistory = ftsHistory["Value"]
try:
self.updateLock().acquire()
self.__ftsGraph = FTSGraph( "FTSGraph", ftsHistory )
finally:
self.updateLock().release()
log.debug( "FTSSites: %s" % len( self.__ftsGraph.nodes() ) )
for i, site in enumerate( self.__ftsGraph.nodes() ):
log.debug( " [%02d] FTSSite: %-25s FTSServer: %s" % ( i, site.name, site.FTSServer ) )
log.debug( "FTSRoutes: %s" % len( self.__ftsGraph.edges() ) )
for i, route in enumerate( self.__ftsGraph.edges() ):
log.debug( " [%02d] FTSRoute: %-25s Active FTSJobs (Max) = %s (%s)" % ( i,
route.routeName,
route.ActiveJobs,
route.toNode.MaxActiveJobs ) )
# # save graph stamp
self.__ftsGraphValidStamp = datetime.datetime.now() + datetime.timedelta( seconds = self.FTSGRAPH_REFRESH )
# # refresh SE R/W access
try:
self.updateLock().acquire()
self.__ftsGraph.updateRWAccess()
finally:
self.updateLock().release()
# # save rw access stamp
self.__rwAccessValidStamp = datetime.datetime.now() + datetime.timedelta( seconds = self.RW_REFRESH )
return S_OK()
def initialize( self ):
""" agent's initialization """
# # data manager
self.dataManager = DataManager()
log = self.log.getSubLogger( "initialize" )
self.FTSGRAPH_REFRESH = self.am_getOption( "FTSGraphValidityPeriod", self.FTSGRAPH_REFRESH )
log.info( "FTSGraph validity period = %s s" % self.FTSGRAPH_REFRESH )
self.RW_REFRESH = self.am_getOption( "RWAccessValidityPeriod", self.RW_REFRESH )
log.info( "SEs R/W access validity period = %s s" % self.RW_REFRESH )
self.SUBMIT_COMMAND = self.am_getOption( "SubmitCommand", self.SUBMIT_COMMAND )
log.info( "FTS submit command = %s" % self.SUBMIT_COMMAND )
self.MONITOR_COMMAND = self.am_getOption( "MonitorCommand", self.MONITOR_COMMAND )
log.info( "FTS commands: submit = %s monitor %s" % ( self.SUBMIT_COMMAND, self.MONITOR_COMMAND ) )
self.PIN_TIME = self.am_getOption( "PinTime", self.PIN_TIME )
log.info( "Stage files before submission = %s" % {True: "yes", False: "no"}[bool( self.PIN_TIME )] )
self.MAX_ACTIVE_JOBS = self.am_getOption( "MaxActiveJobsPerRoute", self.MAX_ACTIVE_JOBS )
log.info( "Max active FTSJobs/route = %s" % self.MAX_ACTIVE_JOBS )
self.MAX_FILES_PER_JOB = self.am_getOption( "MaxFilesPerJob", self.MAX_FILES_PER_JOB )
log.info( "Max FTSFiles/FTSJob = %d" % self.MAX_FILES_PER_JOB )
self.MAX_ATTEMPT = self.am_getOption( "MaxTransferAttempts", self.MAX_ATTEMPT )
log.info( "Max transfer attempts = %s" % self.MAX_ATTEMPT )
# # thread pool
self.MIN_THREADS = self.am_getOption( "MinThreads", self.MIN_THREADS )
self.MAX_THREADS = self.am_getOption( "MaxThreads", self.MAX_THREADS )
minmax = ( abs( self.MIN_THREADS ), abs( self.MAX_THREADS ) )
self.MIN_THREADS, self.MAX_THREADS = min( minmax ), max( minmax )
log.info( "ThreadPool min threads = %s" % self.MIN_THREADS )
log.info( "ThreadPool max threads = %s" % self.MAX_THREADS )
log.info( "initialize: creation of FTSGraph..." )
createGraph = self.resetFTSGraph()
if not createGraph["OK"]:
log.error( "initialize: %s" % createGraph["Message"] )
return createGraph
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
log.info( "will use DataManager proxy" )
# # gMonitor stuff here
gMonitor.registerActivity( "RequestsAtt", "Attempted requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestsOK", "Successful requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestsFail", "Failed requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubAtt", "FTSJobs creation attempts",
"FTSAgent", "Created FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubOK", "FTSJobs submitted successfully",
"FTSAgent", "Successful FTSJobs submissions/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubFail", "FTSJobs submissions failed",
"FTSAgent", "Failed FTSJobs submissions/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonAtt", "FTSJobs monitored",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonOK", "FTSJobs monitored successfully",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonFail", "FTSJobs attempts failed",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSMonitorFail", "Failed FTS monitor executions",
"FTSAgent", "Execution/mins", gMonitor.OP_SUM )
pollingTime = self.am_getOption( "PollingTime", 60 )
for status in list( FTSJob.INITSTATES + FTSJob.TRANSSTATES + FTSJob.FAILEDSTATES + FTSJob.FINALSTATES ):
gMonitor.registerActivity( "FTSJobs%s" % status, "FTSJobs %s" % status ,
"FTSAgent", "FTSJobs/cycle", gMonitor.OP_ACUM, pollingTime )
gMonitor.registerActivity( "FtSJobsPerRequest", "Average FTSJobs per request",
"FTSAgent", "FTSJobs/Request", gMonitor.OP_MEAN )
gMonitor.registerActivity( "FTSFilesPerJob", "FTSFiles per FTSJob",
"FTSAgent", "Number of FTSFiles per FTSJob", gMonitor.OP_MEAN )
gMonitor.registerActivity( "FTSSizePerJob", "Average FTSFiles size per FTSJob",
"FTSAgent", "Average submitted size per FTSJob", gMonitor.OP_MEAN )
return S_OK()
def finalize( self ):
""" finalize processing """
# log = self.log.getSubLogger( "finalize" )
# if self.__reqCache:
# log.info( 'putting back %d requests from cache' % len( self.__reqCache ) )
# else:
# log.info( 'no requests to put back' )
# for request in self.__reqCache.values():
# put = self.requestClient().putRequest( request )
# if not put["OK"]:
# log.error( "unable to put back request '%s': %s" % ( request.RequestName, put["Message"] ) )
return S_OK()
def execute( self ):
""" one cycle execution """
log = gLogger.getSubLogger( "execute" )
# # reset FTSGraph if expired
now = datetime.datetime.now()
if now > self.__ftsGraphValidStamp:
log.info( "resetting expired FTS graph..." )
resetFTSGraph = self.resetFTSGraph()
if not resetFTSGraph["OK"]:
log.error( "FTSGraph recreation error: %s" % resetFTSGraph["Message"] )
return resetFTSGraph
self.__ftsGraphValidStamp = now + datetime.timedelta( seconds = self.FTSGRAPH_REFRESH )
# # update R/W access in FTSGraph if expired
if now > self.__rwAccessValidStamp:
log.info( "updating expired R/W access for SEs..." )
try:
self.updateLock().acquire()
self.__ftsGraph.updateRWAccess()
finally:
self.updateLock().release()
self.__rwAccessValidStamp = now + datetime.timedelta( seconds = self.RW_REFRESH )
requestNames = self.requestClient().getRequestNamesList( [ "Scheduled" ] )
if not requestNames["OK"]:
log.error( "unable to read scheduled request names: %s" % requestNames["Message"] )
return requestNames
if not requestNames["Value"]:
requestNames = self.__reqCache.keys()
else:
requestNames = [ req[0] for req in requestNames["Value"] ]
requestNames = list( set ( requestNames + self.__reqCache.keys() ) )
if not requestNames:
log.info( "no 'Scheduled' requests to process" )
return S_OK()
log.info( "found %s requests to process:" % len( requestNames ) )
log.info( " => from internal cache: %s" % ( len( self.__reqCache ) ) )
log.info( " => new read from RMS: %s" % ( len( requestNames ) - len( self.__reqCache ) ) )
for requestName in requestNames:
request = self.getRequest( requestName )
if not request["OK"]:
log.error( request["Message"] )
continue
request = request["Value"]
sTJId = request.RequestName
while True:
queue = self.threadPool().generateJobAndQueueIt( self.processRequest,
args = ( request, ),
sTJId = sTJId )
if queue["OK"]:
log.info( "request '%s' enqueued for execution" % sTJId )
gMonitor.addMark( "RequestsAtt", 1 )
break
time.sleep( 1 )
# # process all results
self.threadPool().processAllResults()
return S_OK()
def processRequest( self, request ):
""" process one request
:param Request request: ReqDB.Request
"""
log = self.log.getSubLogger( request.RequestName )
operation = request.getWaiting()
if not operation["OK"]:
log.error( "unable to find 'Scheduled' ReplicateAndRegister operation in request" )
return self.putRequest( request )
operation = operation["Value"]
if not isinstance( operation, Operation ):
log.error( "waiting returned operation is not an operation: %s" % type( operation ) )
return self.putRequest( request )
if operation.Type != "ReplicateAndRegister":
log.error( "operation to be executed is not a ReplicateAndRegister but %s" % operation.Type )
return self.putRequest( request )
if operation.Status != "Scheduled":
log.error( "operation in a wrong state, expecting 'Scheduled', got %s" % operation.Status )
return self.putRequest( request )
log.info( 'start processRequest' )
# # select FTSJobs, by default all in TRANS_STATES and INIT_STATES
ftsJobs = self.ftsClient().getFTSJobsForRequest( request.RequestID )
if not ftsJobs["OK"]:
log.error( ftsJobs["Message"] )
return ftsJobs
ftsJobs = [ftsJob for ftsJob in ftsJobs.get( "Value", [] ) if ftsJob.Status not in FTSJob.FINALSTATES]
# # Use a try: finally: for making sure FTS jobs are put back before returnin
try:
# # dict keeping info about files to reschedule, submit, fail and register
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
if ftsJobs:
log.info( "==> found %s FTSJobs to monitor" % len( ftsJobs ) )
# # PHASE 0 = monitor active FTSJobs
for ftsJob in ftsJobs:
monitor = self.__monitorJob( request, ftsJob )
if not monitor["OK"]:
log.error( "unable to monitor FTSJob %s: %s" % ( ftsJob.FTSJobID, monitor["Message"] ) )
ftsJob.Status = "Submitted"
else:
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, monitor["Value"] )
log.info( "monitoring of FTSJobs completed" )
for key, ftsFiles in ftsFilesDict.items():
if ftsFiles:
log.debug( " => %s FTSFiles to %s" % ( len( ftsFiles ), key[2:].lower() ) )
# # PHASE ONE - check ready replicas
missingReplicas = self.__checkReadyReplicas( request, operation )
if not missingReplicas["OK"]:
log.error( missingReplicas["Message"] )
else:
missingReplicas = missingReplicas["Value"]
for opFile in operation:
# Actually the condition below should never happen... Change printout for checking
if opFile.LFN not in missingReplicas and opFile.Status not in ( 'Done', 'Failed' ):
log.warn( "File should be set Done! %s is replicated at all targets" % opFile.LFN )
opFile.Status = "Done"
if missingReplicas:
# Check if these files are in the FTSDB
ftsFiles = self.ftsClient().getAllFTSFilesForRequest( request.RequestID )
if not ftsFiles['OK']:
log.error( ftsFiles['Message'] )
else:
ftsFiles = ftsFiles['Value']
ftsLfns = set( [ftsFile.LFN for ftsFile in ftsFiles] )
# Recover files not in FTSDB
toSchedule = set( missingReplicas ) - ftsLfns
if toSchedule:
log.warn( '%d files in operation are not in FTSDB, reset them Waiting' % len( toSchedule ) )
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# Recover files with target not in FTSDB
toSchedule = set( [missing for missing, missingSEs in missingReplicas.items()
if not [ftsFile for ftsFile in ftsFiles
if ftsFile.LFN == missing and ftsFile.TargetSE in missingSEs]] )
if toSchedule:
log.warn( '%d targets in operation are not in FTSDB, reset files Waiting' % len( toSchedule ) )
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# identify missing LFNs that are waiting for a replication which is finished
for ftsFile in [f for f in ftsFiles if f.LFN in missingReplicas and f.Status.startswith( 'Waiting#' )]:
targetSE = ftsFile.Status.split( '#' )[1]
finishedFiles = [f for f in ftsFiles if
f.LFN == ftsFile.LFN and
f.Status == 'Finished' and
f.TargetSE == targetSE and
f not in ftsFilesDict['toUpdate']]
if finishedFiles:
log.warn( "%s is %s while replication was Finished to %s, update" % ( ftsFile.LFN, ftsFile.Status, targetSE ) )
ftsFilesDict['toUpdate'] += finishedFiles
# identify Finished transfer for which the replica is still missing
for ftsFile in [f for f in ftsFiles if f.Status == 'Finished' and f.TargetSE in missingReplicas.get( f.LFN, [] ) and f not in ftsFilesDict['toRegister'] ]:
# Check if there is a registration operation for that file and that target
regOp = [op for op in request if
op.Type == 'RegisterReplica' and
op.TargetSE == ftsFile.TargetSE and
[f for f in op if f.LFN == ftsFile.LFN]]
if not regOp:
ftsFilesDict['toReschedule'].append( ftsFile )
toFail = ftsFilesDict.get( "toFail", [] )
toReschedule = ftsFilesDict.get( "toReschedule", [] )
toSubmit = ftsFilesDict.get( "toSubmit", [] )
toRegister = ftsFilesDict.get( "toRegister", [] )
toUpdate = ftsFilesDict.get( "toUpdate", [] )
# # PHASE TWO = Failed files? -> make request Failed and return
if toFail:
log.error( "==> found %s 'Failed' FTSFiles, but maybe other files can be processed..." % len( toFail ) )
for opFile in operation:
for ftsFile in toFail:
if opFile.FileID == ftsFile.FileID:
opFile.Error = ftsFile.Error
opFile.Status = "Failed"
operation.Error = "%s files are missing any replicas" % len( toFail )
# # requets.Status should be Failed if all files in the operation "Failed"
if request.Status == "Failed":
request.Error = "ReplicateAndRegister %s failed" % operation.Order
log.error( "request is set to 'Failed'" )
# # putRequest is done by the finally: clause... Not good to do it twice
raise escapeTry
# # PHASE THREE - update Waiting#TargetSE FTSFiles
if toUpdate:
log.info( "==> found %s possible FTSFiles to update..." % ( len( toUpdate ) ) )
byTarget = {}
for ftsFile in toUpdate:
byTarget.setdefault( ftsFile.TargetSE, [] ).append( ftsFile.FileID )
for targetSE, fileIDList in byTarget.items():
update = self.ftsClient().setFTSFilesWaiting( operation.OperationID, targetSE, fileIDList )
if not update["OK"]:
log.error( "update FTSFiles failed: %s" % update["Message"] )
# # PHASE FOUR - add 'RegisterReplica' Operations
if toRegister:
log.info( "==> found %d Files waiting for registration, adding 'RegisterReplica' operations" % len( toRegister ) )
registerFiles = self.__insertRegisterOperation( request, operation, toRegister )
if not registerFiles["OK"]:
log.error( "unable to create 'RegisterReplica' operations: %s" % registerFiles["Message"] )
# if request.Status == "Waiting":
# log.info( "request is in 'Waiting' state, will put it back to RMS" )
# return self.putRequest( request )
# # PHASE FIVE - reschedule operation files
if toReschedule:
log.info( "==> found %s Files to reschedule" % len( toReschedule ) )
rescheduleFiles = self.__reschedule( request, operation, toReschedule )
if not rescheduleFiles["OK"]:
log.error( rescheduleFiles["Message"] )
# # PHASE SIX - read Waiting ftsFiles and submit new FTSJobs. We get also Failed files to recover them if needed
ftsFiles = self.ftsClient().getFTSFilesForRequest( request.RequestID, [ "Waiting", "Failed", 'Submitted', 'Canceled' ] )
if not ftsFiles["OK"]:
log.error( ftsFiles["Message"] )
else:
retryIds = set ( [ ftsFile.FTSFileID for ftsFile in toSubmit ] )
for ftsFile in ftsFiles["Value"]:
if ftsFile.FTSFileID not in retryIds:
if ftsFile.Status in ( 'Failed', 'Canceled' ):
# If the file was not unrecoverable failed and is not yet set toSubmit
_reschedule, submit, _fail = self.__checkFailed( ftsFile )
elif ftsFile.Status == 'Submitted':
if ftsFile.FTSGUID not in [job.FTSGUID for job in ftsJobs]:
log.warn( 'FTS GUID %s not found in FTS jobs, resubmit file transfer' % ftsFile.FTSGUID )
ftsFile.Status = 'Waiting'
submit = True
else:
submit = False
else:
submit = True
if submit:
toSubmit.append( ftsFile )
retryIds.add( ftsFile.FTSFileID )
# # submit new ftsJobs
if toSubmit:
if request.Status != 'Scheduled':
log.info( "Found %d FTSFiles to submit while request is no longer in Scheduled status (%s)" \
% ( len( toSubmit ), request.Status ) )
else:
self.__checkDuplicates( request.RequestName, toSubmit )
log.info( "==> found %s FTSFiles to submit" % len( toSubmit ) )
submit = self.__submit( request, operation, toSubmit )
if not submit["OK"]:
log.error( submit["Message"] )
else:
ftsJobs += submit["Value"]
# # status change? - put back request
if request.Status != "Scheduled":
log.info( "request no longer in 'Scheduled' state (%s), will put it back to RMS" % request.Status )
except escapeTry:
# This clause is raised when one wants to return from within the try: clause
pass
except Exception, exceptMessage:
log.exception( "Exception in processRequest", lException = exceptMessage )
finally:
putRequest = self.putRequest( request, clearCache = ( request.Status != "Scheduled" ) )
if not putRequest["OK"]:
log.error( "unable to put back request:", putRequest["Message"] )
# # put back jobs in all cases
if ftsJobs:
for ftsJob in list( ftsJobs ):
if not len( ftsJob ):
log.warn( 'FTS job empty, removed: %s' % ftsJob.FTSGUID )
self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
ftsJobs.remove( ftsJob )
putJobs = self.putFTSJobs( ftsJobs )
if not putJobs["OK"]:
log.error( "unable to put back FTSJobs: %s" % putJobs["Message"] )
putRequest = putJobs
# This is where one returns from after execution of the finally: block
return putRequest
def __checkDuplicates( self, name, toSubmit ):
""" Check in a list of FTSFiles whether there are duplicates
"""
tupleList = []
log = self.log.getSubLogger( "%s/checkDuplicates" % name )
for ftsFile in list( toSubmit ):
fTuple = ( ftsFile.LFN, ftsFile.SourceSE, ftsFile.TargetSE )
if fTuple in tupleList:
log.warn( "Duplicate file to submit, removed:", ', '.join( fTuple ) )
toSubmit.remove( ftsFile )
self.ftsClient().deleteFTSFiles( ftsFile.OperationID, [ftsFile.FileID] )
else:
tupleList.append( fTuple )
def __reschedule( self, request, operation, toReschedule ):
""" reschedule list of :toReschedule: files in request for operation :operation:
:param Request request:
:param Operation operation:
:param list toReschedule: list of FTSFiles
"""
log = self.log.getSubLogger( "%s/reschedule" % request.RequestName )
ftsFileIDs = [ftsFile.FileID for ftsFile in toReschedule]
for opFile in operation:
if opFile.FileID in ftsFileIDs:
opFile.Status = "Waiting"
toSchedule = []
# # filter files
for opFile in operation.getWaitingFilesList():
replicas = self.__filterReplicas( opFile )
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas["NoReplicas"]
badReplicas = replicas['Bad']
if validReplicas:
validTargets = list( set( operation.targetSEList ) - set( validReplicas ) )
if not validTargets:
log.info( "file %s is already present at all targets" % opFile.LFN )
opFile.Status = "Done"
else:
toSchedule.append( ( opFile.toJSON()["Value"], validReplicas, validTargets ) )
elif noMetaReplicas:
log.warn( "unable to schedule '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) )
elif noReplicas:
log.warn( "unable to schedule %s, file doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) )
opFile.Status = 'Failed'
elif badReplicas:
log.warn( "unable to schedule %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) )
opFile.Status = 'Failed'
# # do real schedule here
if toSchedule:
log.info( "Rescheduling %d files" % len( toReschedule ) )
ftsSchedule = self.ftsClient().ftsSchedule( request.RequestID,
operation.OperationID,
toSchedule )
if not ftsSchedule["OK"]:
log.error( "Error scheduling files", ftsSchedule["Message"] )
return ftsSchedule
ftsSchedule = ftsSchedule["Value"]
for opFile in operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
opFile.Status = "Scheduled"
elif fileID in ftsSchedule["Failed"]:
opFile.Error = ftsSchedule["Failed"][fileID]
log.error( "Error scheduling file %s" % opFile.LFN, opFile.Error )
return S_OK()
def __submit( self, request, operation, toSubmit ):
""" create and submit new FTSJobs using list of FTSFiles
:param Request request: ReqDB.Request instance
:param list ftsFiles: list of FTSFile instances
:return: [ FTSJob, FTSJob, ...]
"""
log = self.log.getSubLogger( "%s/submit" % request.RequestName )
bySourceAndTarget = {}
for ftsFile in toSubmit:
if ftsFile.SourceSE not in bySourceAndTarget:
bySourceAndTarget.setdefault( ftsFile.SourceSE, {} )
if ftsFile.TargetSE not in bySourceAndTarget[ftsFile.SourceSE]:
bySourceAndTarget[ftsFile.SourceSE].setdefault( ftsFile.TargetSE, [] )
bySourceAndTarget[ftsFile.SourceSE][ftsFile.TargetSE].append( ftsFile )
ftsJobs = []
for source, targetDict in bySourceAndTarget.items():
for target, ftsFileList in targetDict.items():
log.info( "found %s files to submit from %s to %s" % ( len( ftsFileList ), source, target ) )
route = self.__ftsGraph.findRoute( source, target )
if not route["OK"]:
log.error( route["Message"] )
continue
route = route["Value"]
sourceRead = route.fromNode.SEs[source]["read"]
if not sourceRead:
log.error( "SourceSE %s is banned for reading right now" % source )
continue
targetWrite = route.toNode.SEs[target]["write"]
if not targetWrite:
log.error( "TargetSE %s is banned for writing right now" % target )
continue
if route.ActiveJobs > route.toNode.MaxActiveJobs:
log.warn( "unable to submit new FTS job, max active jobs reached" )
continue
sourceSE = self.getSE( source )
sourceToken = sourceSE.getStorageParameters( "SRM2" )
if not sourceToken["OK"]:
log.error( "unable to get sourceSE '%s' parameters: %s" % ( source, sourceToken["Message"] ) )
continue
seStatus = sourceSE.getStatus()['Value']
targetSE = self.getSE( target )
targetToken = targetSE.getStorageParameters( "SRM2" )
if not targetToken["OK"]:
log.error( "unable to get targetSE '%s' parameters: %s" % ( target, targetToken["Message"] ) )
continue
# # create FTSJob
for fileList in breakListIntoChunks( ftsFileList, self.MAX_FILES_PER_JOB ):
ftsJob = FTSJob()
ftsJob.RequestID = request.RequestID
ftsJob.OperationID = operation.OperationID
ftsJob.SourceSE = source
ftsJob.TargetSE = target
ftsJob.SourceToken = sourceToken["Value"].get( "SpaceToken", "" )
ftsJob.TargetToken = targetToken["Value"].get( "SpaceToken", "" )
ftsJob.FTSServer = route.toNode.FTSServer
for ftsFile in fileList:
ftsFile.Attempt += 1
ftsFile.Error = ""
ftsJob.addFile( ftsFile )
submit = ftsJob.submitFTS2( command = self.SUBMIT_COMMAND, pinTime = self.PIN_TIME if seStatus['TapeSE'] else 0 )
if not submit["OK"]:
log.error( "unable to submit FTSJob: %s" % submit["Message"] )
continue
log.info( "FTSJob '%s'@'%s' has been submitted" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # update statuses for job files
for ftsFile in ftsJob:
ftsFile.FTSGUID = ftsJob.FTSGUID
ftsFile.Status = "Submitted"
ftsFile.Attempt += 1
# # update graph route
try:
self.updateLock().acquire()
route.ActiveJobs += 1
finally:
self.updateLock().release()
ftsJobs.append( ftsJob )
log.info( "%s new FTSJobs have been submitted" % len( ftsJobs ) )
return S_OK( ftsJobs )
def __monitorJob( self, request, ftsJob ):
""" execute FTSJob.monitorFTS2 for a given :ftsJob:
if ftsJob is in a final state, finalize it
:param Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger( "%s/monitor/%s" % ( request.RequestName, ftsJob.FTSGUID ) )
log.info( "FTSJob '%s'@'%s'" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # this will be returned
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
monitor = ftsJob.monitorFTS2( command = self.MONITOR_COMMAND )
if not monitor["OK"]:
gMonitor.addMark( "FTSMonitorFail", 1 )
log.error( monitor["Message"] )
if "getTransferJobSummary2: Not authorised to query request" in monitor["Message"] or \
'was not found' in monitor['Message'] or\
'Unknown transfer state' in monitor['Message']:
log.error( "FTSJob not known (expired on server?): delete it" )
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append( ftsFile )
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
if not res['OK']:
log.error( "Unable to delete FTSJob", res['Message'] )
return S_OK( ftsFilesDict )
return monitor
monitor = monitor["Value"]
log.info( "FTSJob Status = %s Completeness = %s" % ( ftsJob.Status, ftsJob.Completeness ) )
# # monitor status change
gMonitor.addMark( "FTSJobs%s" % ftsJob.Status, 1 )
if ftsJob.Status in FTSJob.FINALSTATES:
finalizeFTSJob = self.__finalizeFTSJob( request, ftsJob )
if not finalizeFTSJob["OK"]:
if 'Unknown transfer state' in monitor['Message']:
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append( ftsFile )
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
if not res['OK']:
log.error( "Unable to delete FTSJob", res['Message'] )
else:
log.error( finalizeFTSJob["Message"] )
return finalizeFTSJob
else:
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, finalizeFTSJob["Value"] )
return S_OK( ftsFilesDict )
def __finalizeFTSJob( self, request, ftsJob ):
""" finalize FTSJob
:param Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger( "%s/monitor/%s/finalize" % ( request.RequestName, ftsJob.FTSJobID ) )
log.info( "finalizing FTSJob %s@%s" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # this will be returned
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
monitor = ftsJob.monitorFTS2( command = self.MONITOR_COMMAND, full = True )
if not monitor["OK"]:
log.error( monitor["Message"] )
return monitor
# # split FTSFiles to different categories
processFiles = self.__filterFiles( ftsJob )
if not processFiles["OK"]:
log.error( processFiles["Message"] )
return processFiles
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, processFiles["Value"] )
# # send accounting record for this job
self.__sendAccounting( ftsJob, request.OwnerDN )
# # update graph - remove this job from graph
route = self.__ftsGraph.findRoute( ftsJob.SourceSE, ftsJob.TargetSE )
if route["OK"]:
try:
self.updateLock().acquire()
route["Value"].ActiveJobs -= 1
finally:
self.updateLock().release()
log.info( "FTSJob is finalized" )
return S_OK( ftsFilesDict )
def __checkFailed( self, ftsFile ):
reschedule = False
submit = False
fail = False
if ftsFile.Status in ( "Failed", 'Canceled' ):
if ftsFile.Error == "MissingSource":
reschedule = True
else:
if ftsFile.Attempt < self.MAX_ATTEMPT:
submit = True
else:
fail = True
return reschedule, submit, fail
def __filterFiles( self, ftsJob ):
""" process ftsFiles from finished ftsJob
:param FTSJob ftsJob: monitored FTSJob instance
"""
# # lists for different categories
toUpdate = []
toReschedule = []
toRegister = []
toSubmit = []
toFail = []
# # loop over files in fts job
for ftsFile in ftsJob:
# # successful files
if ftsFile.Status == "Finished":
if ftsFile.Error == "AddCatalogReplicaFailed":
toRegister.append( ftsFile )
toUpdate.append( ftsFile )
continue
reschedule, submit, fail = self.__checkFailed( ftsFile )
if reschedule:
toReschedule.append( ftsFile )
elif submit:
toSubmit.append( ftsFile )
elif fail:
toFail.append( ftsFile )
return S_OK( { "toUpdate": toUpdate,
"toSubmit": toSubmit,
"toRegister": toRegister,
"toReschedule": toReschedule,
"toFail": toFail } )
def __insertRegisterOperation( self, request, operation, toRegister ):
""" add RegisterReplica operation
:param Request request: request instance
:param Operation transferOp: 'ReplicateAndRegister' operation for this FTSJob
:param list toRegister: [ FTSDB.FTSFile, ... ] - files that failed to register
"""
log = self.log.getSubLogger( "%s/registerFiles" % request.RequestName )
byTarget = {}
for ftsFile in toRegister:
if ftsFile.TargetSE not in byTarget:
byTarget.setdefault( ftsFile.TargetSE, [] )
byTarget[ftsFile.TargetSE].append( ftsFile )
log.info( "will create %s 'RegisterReplica' operations" % len( byTarget ) )
for target, ftsFileList in byTarget.items():
log.info( "creating 'RegisterReplica' operation for targetSE %s with %s files..." % ( target,
len( ftsFileList ) ) )
registerOperation = Operation()
registerOperation.Type = "RegisterReplica"
registerOperation.Status = "Waiting"
registerOperation.TargetSE = target
targetSE = self.getSE( target )
for ftsFile in ftsFileList:
opFile = File()
opFile.LFN = ftsFile.LFN
pfn = returnSingleResult( targetSE.getPfnForProtocol( ftsFile.TargetSURL, protocol = "SRM2", withPort = False ) )
if not pfn["OK"]:
continue
opFile.PFN = pfn["Value"]
registerOperation.addFile( opFile )
request.insertBefore( registerOperation, operation )
return S_OK()
@staticmethod
def __sendAccounting( ftsJob, ownerDN ):
""" prepare and send DataOperation to AccouringDB """
dataOp = DataOperation()
dataOp.setStartTime( fromString( ftsJob.SubmitTime ) )
dataOp.setEndTime( fromString( ftsJob.LastUpdate ) )
accountingDict = dict()
accountingDict["OperationType"] = "ReplicateAndRegister"
username = getUsernameForDN( ownerDN )
if not username["OK"]:
username = ownerDN
else:
username = username["Value"]
accountingDict["User"] = username
accountingDict["Protocol"] = "FTS3" if 'fts3' in ftsJob.FTSServer.lower() else 'FTS'
accountingDict['ExecutionSite'] = ftsJob.FTSServer
accountingDict['RegistrationTime'] = ftsJob._regTime
accountingDict['RegistrationOK'] = ftsJob._regSuccess
accountingDict['RegistrationTotal'] = ftsJob._regTotal
accountingDict["TransferOK"] = len( [ f for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
accountingDict["TransferTotal"] = len( ftsJob )
accountingDict["TransferSize"] = ftsJob.Size - ftsJob.FailedSize
accountingDict["FinalStatus"] = ftsJob.Status
accountingDict["Source"] = ftsJob.SourceSE
accountingDict["Destination"] = ftsJob.TargetSE
# dt = ftsJob.LastUpdate - ftsJob.SubmitTime
# transferTime = dt.days * 86400 + dt.seconds
# accountingDict["TransferTime"] = transferTime
accountingDict['TransferTime'] = sum( [int( f._duration ) for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
dataOp.setValuesFromDict( accountingDict )
dataOp.commit()
def __checkReadyReplicas( self, request, operation ):
""" check ready replicas for transferOperation """
log = self.log.getSubLogger( "%s/checkReadyReplicas" % request.RequestName )
targetSESet = set( operation.targetSEList )
# # { LFN: [ targetSE, ... ] }
missingReplicas = {}
scheduledFiles = dict( [ ( opFile.LFN, opFile ) for opFile in operation
if opFile.Status in ( "Scheduled", "Waiting" ) ] )
# # get replicas
replicas = FileCatalog().getReplicas( scheduledFiles )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
return replicas
replicas = replicas["Value"]
fullyReplicated = 0
missingSEs = {}
for successfulLFN in replicas["Successful"]:
reps = set( replicas['Successful'][successfulLFN] )
if targetSESet.issubset( reps ):
log.info( "%s has been replicated to all targets" % successfulLFN )
fullyReplicated += 1
scheduledFiles[successfulLFN].Status = "Done"
else:
missingReplicas[successfulLFN] = sorted( targetSESet - reps )
ses = ",".join( missingReplicas[ successfulLFN ] )
missingSEs[ses] = missingSEs.setdefault( ses, 0 ) + 1
log.verbose( "%s is still missing at %s" % ( successfulLFN, ses ) )
if fullyReplicated:
log.info( "%d new files have been replicated to all targets" % fullyReplicated )
if missingSEs:
for ses in missingSEs:
log.info( "%d replicas still missing at %s" % ( missingSEs[ses], ses ) )
reMissing = re.compile( "no such file or directory" )
for failedLFN, errStr in replicas["Failed"].items():
scheduledFiles[failedLFN].Error = errStr
if reMissing.search( errStr.lower() ):
log.error( "%s is missing, setting its status to 'Failed'" % failedLFN )
scheduledFiles[failedLFN].Status = "Failed"
else:
log.warn( "unable to read replicas for %s: %s" % ( failedLFN, errStr ) )
return S_OK( missingReplicas )
def __filterReplicas( self, opFile ):
""" filter out banned/invalid source SEs """
from DIRAC.DataManagementSystem.Agent.RequestOperations.ReplicateAndRegister import filterReplicas
return filterReplicas( opFile, logger = self.log, dataManager = self.dataManager, seCache = self.getSECache() )
|
rajanandakumar/DIRAC
|
DataManagementSystem/Agent/FTSAgent.py
|
Python
|
gpl-3.0
| 46,311
|
[
"DIRAC"
] |
9a114c49a4f022fd0c116a6a242d9b6c22214578e6d82e7859c931d06fa6efb9
|
import logging
from cStringIO import StringIO
from math import exp
from lxml import etree
from path import path # NOTE (THK): Only used for detecting presence of syllabus
import requests
from datetime import datetime
import dateutil.parser
from xmodule.modulestore import Location
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.util.decorators import lazyproperty
from xmodule.graders import grader_from_conf
import json
from xblock.fields import Scope, List, String, Dict, Boolean
from .fields import Date
from xmodule.modulestore.locator import CourseLocator
from django.utils.timezone import UTC
from xmodule.util import date_utils
log = logging.getLogger(__name__)
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
self.start_page = int(self.table_of_contents[0].attrib['page'])
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
self.end_page = int(last_el.attrib['page'])
@lazyproperty
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(UTC) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s" % toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(help="LTI tools passports as id:client_key:client_secret", scope=Scope.settings)
textbooks = TextbookList(help="List of pairs of (title, url) for textbooks used in this course",
default=[], scope=Scope.content)
wiki_slug = String(help="Slug that points to the wiki for this course", scope=Scope.content)
enrollment_start = Date(help="Date that enrollment for this class is opened", scope=Scope.settings)
enrollment_end = Date(help="Date that enrollment for this class is closed", scope=Scope.settings)
start = Date(help="Start time when this module is visible",
# using now(UTC()) resulted in fractional seconds which screwed up comparisons and anyway w/b the
# time of first invocation of this stmt on the server
default=datetime.fromtimestamp(0, UTC()),
scope=Scope.settings)
end = Date(help="Date that this class ends", scope=Scope.settings)
advertised_start = String(help="Date that this course is advertised to start", scope=Scope.settings)
grading_policy = Dict(help="Grading policy definition for this class",
default={"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5
}},
scope=Scope.content)
show_calculator = Boolean(help="Whether to show the calculator in this course", default=False, scope=Scope.settings)
display_name = String(help="Display name for this module", default="Empty", display_name="Display Name", scope=Scope.settings)
show_chat = Boolean(help="Whether to show the chat widget in this course", default=False, scope=Scope.settings)
tabs = List(help="List of tabs to enable in this course", scope=Scope.settings)
end_of_course_survey_url = String(help="Url for the end-of-course survey", scope=Scope.settings)
discussion_blackouts = List(help="List of pairs of start/end dates for discussion blackouts", scope=Scope.settings)
discussion_topics = Dict(help="Map of topics names to ids", scope=Scope.settings)
discussion_sort_alpha = Boolean(scope=Scope.settings, default=False, help="Sort forum categories and subcategories alphabetically.")
testcenter_info = Dict(help="Dictionary of Test Center info", scope=Scope.settings)
announcement = Date(help="Date this course is announced", scope=Scope.settings)
cohort_config = Dict(help="Dictionary defining cohort configuration", scope=Scope.settings)
is_new = Boolean(help="Whether this course should be flagged as new", scope=Scope.settings)
no_grade = Boolean(help="True if this course isn't graded", default=False, scope=Scope.settings)
disable_progress_graph = Boolean(help="True if this course shouldn't display the progress graph", default=False, scope=Scope.settings)
pdf_textbooks = List(help="List of dictionaries containing pdf_textbook configuration", scope=Scope.settings)
html_textbooks = List(help="List of dictionaries containing html_textbook configuration", scope=Scope.settings)
remote_gradebook = Dict(scope=Scope.settings)
allow_anonymous = Boolean(scope=Scope.settings, default=True)
allow_anonymous_to_peers = Boolean(scope=Scope.settings, default=False)
advanced_modules = List(help="Beta modules used in your course", scope=Scope.settings)
has_children = True
checklists = List(scope=Scope.settings,
default=[
{"short_description": "Getting Started With Studio",
"items": [{"short_description": "Add Course Team Members",
"long_description": "Grant your collaborators permission to edit your course so you can work together.",
"is_checked": False,
"action_url": "ManageUsers",
"action_text": "Edit Course Team",
"action_external": False},
{"short_description": "Set Important Dates for Your Course",
"long_description": "Establish your course's student enrollment and launch dates on the Schedule and Details page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Details & Schedule",
"action_external": False},
{"short_description": "Draft Your Course's Grading Policy",
"long_description": "Set up your assignment types and grading policy even if you haven't created all your assignments.",
"is_checked": False,
"action_url": "SettingsGrading",
"action_text": "Edit Grading Settings",
"action_external": False},
{"short_description": "Explore the Other Studio Checklists",
"long_description": "Discover other available course authoring tools, and find help when you need it.",
"is_checked": False,
"action_url": "",
"action_text": "",
"action_external": False}]},
{"short_description": "Draft a Rough Course Outline",
"items": [{"short_description": "Create Your First Section and Subsection",
"long_description": "Use your course outline to build your first Section and Subsection.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Set Section Release Dates",
"long_description": "Specify the release dates for each Section in your course. Sections become visible to students on their release dates.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Designate a Subsection as Graded",
"long_description": "Set a Subsection to be graded as a specific assignment type. Assignments within graded Subsections count toward a student's final grade.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Reordering Course Content",
"long_description": "Use drag and drop to reorder the content in your course.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Renaming Sections",
"long_description": "Rename Sections by clicking the Section name from the Course Outline.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Deleting Course Content",
"long_description": "Delete Sections, Subsections, or Units you don't need anymore. Be careful, as there is no Undo function.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False},
{"short_description": "Add an Instructor-Only Section to Your Outline",
"long_description": "Some course authors find using a section for unsorted, in-progress work useful. To do this, create a section and set the release date to the distant future.",
"is_checked": False,
"action_url": "CourseOutline",
"action_text": "Edit Course Outline",
"action_external": False}]},
{"short_description": "Explore edX's Support Tools",
"items": [{"short_description": "Explore the Studio Help Forum",
"long_description": "Access the Studio Help forum from the menu that appears when you click your user name in the top right corner of Studio.",
"is_checked": False,
"action_url": "http://help.edge.edx.org/",
"action_text": "Visit Studio Help",
"action_external": True},
{"short_description": "Enroll in edX 101",
"long_description": "Register for edX 101, edX's primer for course creation.",
"is_checked": False,
"action_url": "https://edge.edx.org/courses/edX/edX101/How_to_Create_an_edX_Course/about",
"action_text": "Register for edX 101",
"action_external": True},
{"short_description": "Download the Studio Documentation",
"long_description": "Download the searchable Studio reference documentation in PDF form.",
"is_checked": False,
"action_url": "http://files.edx.org/Getting_Started_with_Studio.pdf",
"action_text": "Download Documentation",
"action_external": True}]},
{"short_description": "Draft Your Course About Page",
"items": [{"short_description": "Draft a Course Description",
"long_description": "Courses on edX have an About page that includes a course video, description, and more. Draft the text students will read before deciding to enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Staff Bios",
"long_description": "Showing prospective students who their instructor will be is helpful. Include staff bios on the course About page.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course FAQs",
"long_description": "Include a short list of frequently asked questions about your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False},
{"short_description": "Add Course Prerequisites",
"long_description": "Let students know what knowledge and/or skills they should have before they enroll in your course.",
"is_checked": False,
"action_url": "SettingsDetails",
"action_text": "Edit Course Schedule & Details",
"action_external": False}]}
])
info_sidebar_name = String(scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(help="True if timezones should be shown on dates in the courseware", scope=Scope.settings, default=True)
enrollment_domain = String(help="External login method associated with user accounts allowed to register in course",
scope=Scope.settings)
course_image = String(
help="Filename of the course image",
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(help="DO NOT USE THIS", scope=Scope.settings)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(help="DO NOT USE THIS", scope=Scope.settings)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(help="DO NOT USE THIS", scope=Scope.settings)
display_organization = String(help="An optional display string for the course organization that will get rendered in the LMS",
scope=Scope.settings)
display_coursenumber = String(help="An optional display string for the course number that will get rendered in the LMS",
scope=Scope.settings)
class CourseDescriptor(CourseFields, SequenceDescriptor):
module_class = SequenceModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
if self.wiki_slug is None:
if isinstance(self.location, Location):
self.wiki_slug = self.location.course
elif isinstance(self.location, CourseLocator):
self.wiki_slug = self.location.course_id or self.display_name
msg = None
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {'General': {'id': self.location.html_id()}}
self.test_center_exams = []
test_center_info = self.testcenter_info
if test_center_info is not None:
for exam_name in test_center_info:
try:
exam_info = test_center_info[exam_name]
self.test_center_exams.append(self.TestCenterExam(self.id, exam_name, exam_info))
except Exception as err:
# If we can't parse the test center exam info, don't break
# the rest of the courseware.
msg = 'Error %s: Unable to load test-center exam info for exam "%s" of course "%s"' % (err, exam_name, self.id)
log.error(msg)
continue
# TODO check that this is still needed here and can't be by defaults.
if not self.tabs:
# When calling the various _tab methods, can omit the 'type':'blah' from the
# first arg, since that's only used for dispatch
tabs = []
tabs.append({'type': 'courseware'})
tabs.append({'type': 'course_info', 'name': 'Course Info'})
if self.syllabus_present:
tabs.append({'type': 'syllabus'})
tabs.append({'type': 'textbooks'})
# # If they have a discussion link specified, use that even if we feature
# # flag discussions off. Disabling that is mostly a server safety feature
# # at this point, and we don't need to worry about external sites.
if self.discussion_link:
tabs.append({'type': 'external_discussion', 'link': self.discussion_link})
else:
tabs.append({'type': 'discussion', 'name': 'Discussion'})
tabs.append({'type': 'wiki', 'name': 'Wiki'})
if not self.hide_progress_tab:
tabs.append({'type': 'progress', 'name': 'Progress'})
self.tabs = tabs
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except (IOError):
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, org=None, course=None):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, org, course)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
if self.end is None:
return False
return datetime.now(UTC()) > self.end
def has_started(self):
return datetime.now(UTC()) > self.start
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = self._sorting_dates()
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertized) start dates of the course if available.
The lower the number the "newer" the course.
"""
# Make courses that have an announcement date shave a lower
# score than courses than don't, older courses should have a
# higher score.
announcement, start, now = self._sorting_dates()
scale = 300.0 # about a year
if announcement:
days = (now - announcement).days
score = -exp(-days / scale)
else:
days = (now - start).days
score = exp(days / scale)
return score
def _sorting_dates(self):
# utility function to get datetime objects for dates used to
# compute the is_new flag and the sorting_score
announcement = self.announcement
if announcement is not None:
announcement = announcement
try:
start = dateutil.parser.parse(self.advertised_start)
if start.tzinfo is None:
start = start.replace(tzinfo=UTC())
except (ValueError, AttributeError):
start = self.start
now = datetime.now(UTC())
return announcement, start, now
@lazyproperty
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children():
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for c in self.get_children():
for s in c.get_children():
if s.graded:
xmoduledescriptors = list(yield_descriptor_descendents(s))
xmoduledescriptors.append(s)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {'section_descriptor': s, 'xmoduledescriptors': filter(lambda child: child.has_score, xmoduledescriptors)}
section_format = s.format if s.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(s)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@staticmethod
def id_to_location(course_id):
'''Convert the given course_id (org/course/name) to a location object.
Throws ValueError if course_id is of the wrong format.
'''
org, course, name = course_id.split('/')
return Location('i4x', org, course, 'course', name)
@staticmethod
def location_to_id(location):
'''Convert a location of a course to a course_id. If location category
is not "course", raise a ValueError.
location: something that can be passed to Location
'''
loc = Location(location)
if loc.category != "course":
raise ValueError("{0} is not a course location".format(loc))
return "/".join([loc.org, loc.course, loc.name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location_to_id(self.location)
@property
def start_date_text(self):
def try_parse_iso_8601(text):
try:
result = Date().from_json(text)
if result is None:
result = text.title()
else:
result = result.strftime("%b %d, %Y")
except ValueError:
result = text.title()
return result
if isinstance(self.advertised_start, basestring):
return try_parse_iso_8601(self.advertised_start)
elif self.advertised_start is None and self.start is None:
# TODO this is an impossible state since the init function forces start to have a value
return 'TBD'
else:
return (self.advertised_start or self.start).strftime("%b %d, %Y")
@property
def end_date_text(self):
"""
Returns the end date for the course formatted as a string.
If the course does not have an end date set (course.end is None), an empty string will be returned.
"""
return '' if self.end is None else self.end.strftime("%b %d, %Y")
@property
def forum_posts_allowed(self):
date_proxy = Date()
try:
blackout_periods = [(date_proxy.from_json(start),
date_proxy.from_json(end))
for start, end
in self.discussion_blackouts]
now = datetime.now(UTC())
for start, end in blackout_periods:
if start <= now <= end:
return False
except:
log.exception("Error parsing discussion_blackouts for course {0}".format(self.id))
return True
class TestCenterExam(object):
def __init__(self, course_id, exam_name, exam_info):
self.course_id = course_id
self.exam_name = exam_name
self.exam_info = exam_info
self.exam_series_code = exam_info.get('Exam_Series_Code') or exam_name
self.display_name = exam_info.get('Exam_Display_Name') or self.exam_series_code
self.first_eligible_appointment_date = self._try_parse_time('First_Eligible_Appointment_Date')
if self.first_eligible_appointment_date is None:
raise ValueError("First appointment date must be specified")
# TODO: If defaulting the last appointment date, it should be the
# *end* of the same day, not the same time. It's going to be used as the
# end of the exam overall, so we don't want the exam to disappear too soon.
# It's also used optionally as the registration end date, so time matters there too.
self.last_eligible_appointment_date = self._try_parse_time('Last_Eligible_Appointment_Date') # or self.first_eligible_appointment_date
if self.last_eligible_appointment_date is None:
raise ValueError("Last appointment date must be specified")
self.registration_start_date = (self._try_parse_time('Registration_Start_Date') or
datetime.fromtimestamp(0, UTC()))
self.registration_end_date = self._try_parse_time('Registration_End_Date') or self.last_eligible_appointment_date
# do validation within the exam info:
if self.registration_start_date > self.registration_end_date:
raise ValueError("Registration start date must be before registration end date")
if self.first_eligible_appointment_date > self.last_eligible_appointment_date:
raise ValueError("First appointment date must be before last appointment date")
if self.registration_end_date > self.last_eligible_appointment_date:
raise ValueError("Registration end date must be before last appointment date")
self.exam_url = exam_info.get('Exam_URL')
def _try_parse_time(self, key):
"""
Parse an optional metadata key containing a time: if present, complain
if it doesn't parse.
Return None if not present or invalid.
"""
if key in self.exam_info:
try:
return Date().from_json(self.exam_info[key])
except ValueError as e:
msg = "Exam {0} in course {1} loaded with a bad exam_info key '{2}': '{3}'".format(self.exam_name, self.course_id, self.exam_info[key], e)
log.warning(msg)
return None
def has_started(self):
return datetime.now(UTC()) > self.first_eligible_appointment_date
def has_ended(self):
return datetime.now(UTC()) > self.last_eligible_appointment_date
def has_started_registration(self):
return datetime.now(UTC()) > self.registration_start_date
def has_ended_registration(self):
return datetime.now(UTC()) > self.registration_end_date
def is_registering(self):
now = datetime.now(UTC())
return now >= self.registration_start_date and now <= self.registration_end_date
@property
def first_eligible_appointment_date_text(self):
return self.first_eligible_appointment_date.strftime("%b %d, %Y")
@property
def last_eligible_appointment_date_text(self):
return self.last_eligible_appointment_date.strftime("%b %d, %Y")
@property
def registration_end_date_text(self):
return date_utils.get_default_time_display(self.registration_end_date)
@property
def current_test_center_exam(self):
exams = [exam for exam in self.test_center_exams if exam.has_started_registration() and not exam.has_ended()]
if len(exams) > 1:
# TODO: output some kind of warning. This should already be
# caught if we decide to do validation at load time.
return exams[0]
elif len(exams) == 1:
return exams[0]
else:
return None
def get_test_center_exam(self, exam_series_code):
exams = [exam for exam in self.test_center_exams if exam.exam_series_code == exam_series_code]
return exams[0] if len(exams) == 1 else None
@property
def number(self):
return self.location.course
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
|
praveen-pal/edx-platform
|
common/lib/xmodule/xmodule/course_module.py
|
Python
|
agpl-3.0
| 45,434
|
[
"VisIt"
] |
179f86716af7a219ee4eec2bc3e1516592279e0a83b442d8ee961aca373ad7a2
|
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
Examples
--------
We can show detailed information of the `integrate.quad` function in stdout:
>>> from scipy.integrate import quad_explain
>>> quad_explain()
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == Inf or a == -Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
#Duplicates force function evaluation at singular points
the_points = numpy.unique(points)
the_points = the_points[a < the_points]
the_points = the_points[the_points < b]
the_points = numpy.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed Chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed Chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight.startswith('alg'):
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. `dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1)
(0.6666666666666667, 7.401486830834377e-15)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : function or float
The upper boundary curve in y (same requirements as `gfun`).
qfun : function or float
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float or a float
indicating a constant boundary surface.
rfun : function or float
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simpson: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
Examples
--------
Compute the triple integral of ``x * y * z``, over ``x`` ranging
from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 1, 2, lambda x: 2, lambda x: 3,
... lambda x, y: 0, lambda x, y: 1)
(1.8750000000000002, 3.324644794257407e-14)
"""
# f(z, y, x)
# qfun/rfun (x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Stupid different API...
def ranges0(*args):
return [qfun(args[1], args[0]) if callable(qfun) else qfun,
rfun(args[1], args[0]) if callable(rfun) else rfun]
def ranges1(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def nquad(func, ranges, args=None, opts=None, full_output=False):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : {callable, scipy.LowLevelCallable}
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
which must be floats. Where ```t0, ... tm``` are extra arguments
passed in args.
Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
Integration is carried out in order. That is, integration over ``x0``
is the innermost integral, and ``xn`` is the outermost.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
where ``n`` is the number of variables and args. The ``xx`` array
contains the coordinates and extra arguments. ``user_data`` is the data
contained in the `scipy.LowLevelCallable`.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g., if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and
``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.,
opts[0] corresponds to integration over x0, and so on. If a callable,
the signature must be the same as for ``ranges``. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad` and `quad_explain`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-D numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
class _RangeFunc(object):
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad(object):
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of N-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
|
nmayorov/scipy
|
scipy/integrate/quadpack.py
|
Python
|
bsd-3-clause
| 37,367
|
[
"Gaussian"
] |
795ec4de7f225c1c5a16d8d418d626e60f44c9abc054d2d02e4b7edab050c315
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import xmltodict
import dateutil.parser
from . import base
from .. import models
from ..utils import force_str
class Parser(models.Visitor):
def visit_string(self, shape, value):
return value or ''
def visit_blob(self, shape, value):
return value or ''
def visit_integer(self, shape, value):
return int(value)
visit_long = visit_integer
def visit_float(self, shape, value):
return float(value)
def visit_double(self, shape, value):
return float(value)
def visit_boolean(self, shape, value):
if value == "true":
return True
return False
def visit_timestamp(self, shape, value):
return dateutil.parser.parse(value)
def visit_list(self, shape, value):
if not value:
return []
subshape = shape.of
result = []
if not isinstance(value, list):
value = [value]
for child in value:
result.append(self.visit(subshape, child[subshape.name]))
return result
def visit_map(self, shape, value):
# FIXME: Make Key/Value configurable
if not value:
return {}
if not isinstance(value, list):
value = [value]
out = {}
key_shape = shape.key_shape
value_shape = shape.value_shape
for child in value:
key = self.visit(key_shape, child["Key"])
value = self.visit(value_shape, child["Value"])
out[key] = value
return out
def visit_structure(self, shape, value):
if not value:
return {}
out = {}
for member in shape.iter_members():
if member.name in value:
out[member.name] = self.visit(
member.shape,
value[member.name],
)
return out
class Serializer(models.Visitor):
def visit(self, shape, name, value):
visit_fn_name = "visit_{}".format(shape.type)
try:
visit_fn = getattr(self, visit_fn_name)
except AttributeError:
raise NotImplementedError(visit_fn_name)
return visit_fn(shape, name, value)
def visit_string(self, shape, name, value):
return value or None
def visit_blob(self, shape, name, value):
return value or None
def visit_timestamp(self, shape, name, value):
return value.isoformat()
def visit_integer(self, shape, name, value):
return value
visit_long = visit_integer
def visit_float(self, shape, name, value):
# On python 2.7 we need to take care to repr() floats because
# >>> str(float("-9999.999999999998"))
# '-10000.0'
return repr(value)
visit_double = visit_float
def visit_boolean(self, shape, name, value):
return "true" if value else "false"
def visit_list(self, shape, name, value):
if not value:
return None
subshape = shape.of
nodes = []
for subvalue in value:
nodes.append({
subshape.name: self.visit(
subshape,
subshape.wire_name,
subvalue
)
})
return nodes
def visit_map(self, shape, name, value):
# FIXME: Make Key/Value configurable
key_shape = shape.key_shape
value_shape = shape.value_shape
if not value:
return None
nodes = []
for k, v in value.items():
nodes.append({
"Key": self.visit(key_shape, key_shape.name, k),
"Value": self.visit(value_shape, value_shape.name, v),
})
return nodes
def visit_structure(self, shape, name, value):
structure = collections.OrderedDict()
for member in shape.iter_members():
if member.name in value:
structure[member.wire_name] = self.visit(
member.shape,
member.wire_name,
value[member.name]
)
return structure
class XmlSerializer(base.Serializer):
content_type = 'text/xml'
def _namespaces(self, operation):
namespaces = {}
for ns, uri in operation.model.metadata.get("namespaces", {}).items():
namespaces[uri] = ns if ns else None
return namespaces
def serialize(self, operation, shape, params):
body = Serializer().visit(
shape,
shape.name,
params,
)
for uri, ns in self._namespaces(operation).items():
if ns:
body["@xmlns:{}".format(ns)] = uri
else:
body["@xmlns"] = uri
return force_str(xmltodict.unparse(
{shape.wire_name: body},
pretty=True,
))
def deserialize(self, operation, shape, body):
payload = xmltodict.parse(
body,
strip_whitespace=False,
process_namespaces=True,
namespaces=self._namespaces(operation),
)
return Parser().visit(shape, payload[shape.name])
|
Jc2k/libcloudcore
|
libcloudcore/serializers/xml.py
|
Python
|
apache-2.0
| 6,027
|
[
"VisIt"
] |
dab0aad7559efcb1f7cd417d48155dad80f93d7cd16214c933d751c2ce7f61e2
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Utilities for generating nicer plots.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 13, 2012"
import math
import numpy as np
def get_publication_quality_plot(width=8, height=None, plt=None):
"""
Provides a publication quality plot, with nice defaults for font sizes etc.
Args:
width: Width of plot in inches. Defaults to 8in.
height. Height of plot in inches. Defaults to width * golden ratio.
plt: If plt is supplied, changes will be made to an existing plot.
Otherwise, a new plot will be created.
Returns:
Matplotlib plot object with properly sized fonts.
"""
ticksize = int(width * 2.5)
golden_ratio = (math.sqrt(5) - 1.0) / 2.0
if not height:
height = int(width * golden_ratio)
if plt is None:
import matplotlib.pyplot as plt
plt.figure(figsize=(width, height), facecolor="w")
else:
fig = plt.gcf()
fig.set_size_inches(width, height)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
axes = plt.gca()
axes.set_title(axes.get_title(), size=width * 4)
labelsize = int(width * 3)
axes.set_xlabel(axes.get_xlabel(), size=labelsize)
axes.set_ylabel(axes.get_ylabel(), size=labelsize)
return plt
def get_ax_fig_plt(ax):
"""
Helper function used in plot functions supporting an optional Axes argument.
If ax is None, we build the `matplotlib` figure and create the Axes else
we return the current active figure.
Returns:
ax: :class:`Axes` object
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
else:
fig = plt.gcf()
return ax, fig, plt
def get_axarray_fig_plt(ax_array, nrows=1, ncols=1, sharex=False, sharey=False,
squeeze=True, subplot_kw=None, gridspec_kw=None, **fig_kw):
"""
Helper function used in plot functions that accept an optional array of Axes
as argument. If ax_array is None, we build the `matplotlib` figure and
create the array of Axes by calling plt.subplots else we return the
current active figure.
Returns:
ax: Array of :class:`Axes` objects
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
if ax_array is None:
fig, ax_array = plt.subplots(nrows=nrows, ncols=ncols, sharex=sharex,
sharey=sharey, squeeze=squeeze,
subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw, **fig_kw)
else:
fig = plt.gcf()
if squeeze:
ax_array = np.array(ax_array).ravel()
if len(ax_array) == 1:
ax_array = ax_array[1]
return ax_array, fig, plt
def add_fig_kwargs(func):
"""
Decorator that adds keyword arguments for functions returning matplotlib figure.
See doc string below for the list of supported options.
"""
from functools import wraps
@wraps(func)
def wrapper(*args, **kwargs):
# pop the kwds used by the decorator.
title = kwargs.pop("title", None)
size_kwargs = kwargs.pop("size_kwargs", None)
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
tight_layout = kwargs.pop("tight_layout", False)
# Call func
fig = func(*args, **kwargs)
# Operate on matplotlib figure.
if title is not None: fig.suptitle(title)
if size_kwargs is not None:
fig.set_size_inches(size_kwargs.pop("w"), size_kwargs.pop("h"),
**size_kwargs)
if savefig: fig.savefig(savefig)
if show:
import matplotlib.pyplot as plt
plt.show()
if tight_layout: fig.tight_layout()
return fig
s = "\n" + """\
keyword arguments controlling the display of the figure:
================ ====================================================
kwargs Meaning
================ ====================================================
title Title of the plot (Default: None).
show True to show the figure (Default True).
savefig 'abc.png' or 'abc.eps' to save the figure to a file.
size_kwargs Dictionary with options passed to fig.set_size_inches
example: size_kwargs=dict(w=3, h=4)
tight_layout True if to call fig.tight_layout (default: False)
================ ===================================================="""
if wrapper.__doc__ is not None:
# Add s at the end of the docstring.
wrapper.__doc__ += "\n" + s
else:
# Use s
wrapper.__doc__ = s
return wrapper
|
sonium0/pymatgen
|
pymatgen/util/plotting_utils.py
|
Python
|
mit
| 5,230
|
[
"pymatgen"
] |
ca72b01ac341c78bcb2c6fca61e55afe52e98e6fc6b075e9c725af869a00fb80
|
# -*- coding: utf-8 -*-
#
# test_stdp_triplet_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the stdp_triplet_synapse in NEST.
import nest
import unittest
from math import exp
import numpy as np
@nest.ll_api.check_stack
class STDPTripletConnectionTestCase(unittest.TestCase):
"""Check stdp_triplet_connection model properties."""
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# settings
self.dendritic_delay = 1.0
self.decay_duration = 5.0
self.synapse_model = "stdp_triplet_synapse"
self.syn_spec = {
"model": self.synapse_model,
"delay": self.dendritic_delay,
# set receptor 1 post-synaptically, to not generate extra spikes
"receptor_type": 1,
"weight": 5.0,
"tau_plus": 16.8,
"tau_plus_triplet": 101.0,
"Aplus": 0.1,
"Aminus": 0.1,
"Aplus_triplet": 0.1,
"Aminus_triplet": 0.1,
"Kplus": 0.0,
"Kplus_triplet": 0.0,
"Wmax": 100.0,
}
self.post_neuron_params = {
"tau_minus": 33.7,
"tau_minus_triplet": 125.0,
}
# setup basic circuit
self.pre_neuron = nest.Create("parrot_neuron")
self.post_neuron = nest.Create(
"parrot_neuron", 1, params=self.post_neuron_params)
nest.Connect(self.pre_neuron, self.post_neuron, syn_spec=self.syn_spec)
def generateSpikes(self, neuron, times):
"""Trigger spike to given neuron at specified times."""
delay = 1.
gen = nest.Create("spike_generator", 1, {
"spike_times": [t - delay for t in times]})
nest.Connect(gen, neuron, syn_spec={"delay": delay})
def status(self, which):
"""Get synapse parameter status."""
stats = nest.GetConnections(
self.pre_neuron, synapse_model=self.synapse_model)
return nest.GetStatus(stats, [which])[0][0]
def decay(self, time, Kplus, Kplus_triplet, Kminus, Kminus_triplet):
"""Decay variables."""
Kplus *= exp(-time / self.syn_spec["tau_plus"])
Kplus_triplet *= exp(-time / self.syn_spec["tau_plus_triplet"])
Kminus *= exp(-time / self.post_neuron_params["tau_minus"])
Kminus_triplet *= exp(-time /
self.post_neuron_params["tau_minus_triplet"])
return (Kplus, Kplus_triplet, Kminus, Kminus_triplet)
def facilitate(self, w, Kplus, Kminus_triplet):
"""Facilitate weight."""
Wmax = self.status("Wmax")
return np.sign(Wmax) * (abs(w) + Kplus * (
self.syn_spec["Aplus"] +
self.syn_spec["Aplus_triplet"] * Kminus_triplet)
)
def depress(self, w, Kminus, Kplus_triplet):
"""Depress weight."""
Wmax = self.status("Wmax")
return np.sign(Wmax) * (abs(w) - Kminus * (
self.syn_spec["Aminus"] +
self.syn_spec["Aminus_triplet"] * Kplus_triplet)
)
def assertAlmostEqualDetailed(self, expected, given, message):
"""Improve assetAlmostEqual with detailed message."""
messageWithValues = "%s (expected: `%s` was: `%s`" % (
message, str(expected), str(given))
self.assertAlmostEqual(given, expected, msg=messageWithValues)
def test_badPropertiesSetupsThrowExceptions(self):
"""Check that exceptions are thrown when setting bad parameters."""
def setupProperty(property):
bad_syn_spec = self.syn_spec.copy()
bad_syn_spec.update(property)
nest.Connect(self.pre_neuron, self.post_neuron,
syn_spec=bad_syn_spec)
def badPropertyWith(content, parameters):
self.assertRaisesRegexp(
nest.kernel.NESTError, "BadProperty(.+)" + content,
setupProperty, parameters
)
badPropertyWith("Kplus", {"Kplus": -1.0})
badPropertyWith("Kplus_triplet", {"Kplus_triplet": -1.0})
def test_varsZeroAtStart(self):
"""Check that pre and post-synaptic variables are zero at start."""
self.assertAlmostEqualDetailed(
0.0, self.status("Kplus"), "Kplus should be zero")
self.assertAlmostEqualDetailed(0.0, self.status(
"Kplus_triplet"), "Kplus_triplet should be zero")
def test_preVarsIncreaseWithPreSpike(self):
"""Check that pre-synaptic variables (Kplus, Kplus_triplet) increase
after each pre-synaptic spike."""
self.generateSpikes(self.pre_neuron, [2.0])
Kplus = self.status("Kplus")
Kplus_triplet = self.status("Kplus_triplet")
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(
Kplus + 1.0,
self.status("Kplus"),
"Kplus should have increased by 1")
self.assertAlmostEqualDetailed(
Kplus_triplet + 1.0,
self.status("Kplus_triplet"),
"Kplus_triplet should have increased by 1")
def test_preVarsDecayAfterPreSpike(self):
"""Check that pre-synaptic variables (Kplus, Kplus_triplet) decay
after each pre-synaptic spike."""
self.generateSpikes(self.pre_neuron, [2.0])
# trigger computation
self.generateSpikes(self.pre_neuron, [2.0 + self.decay_duration])
(Kplus, Kplus_triplet, _, _) = self.decay(
self.decay_duration, 1.0, 1.0, 0.0, 0.0)
Kplus += 1.0
Kplus_triplet += 1.0
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(
Kplus, self.status("Kplus"), "Kplus should have decay")
self.assertAlmostEqualDetailed(Kplus_triplet, self.status(
"Kplus_triplet"), "Kplus_triplet should have decay")
def test_preVarsDecayAfterPostSpike(self):
"""Check that pre-synaptic variables (Kplus, Kplus_triplet) decay
after each post-synaptic spike."""
self.generateSpikes(self.pre_neuron, [2.0])
self.generateSpikes(self.post_neuron, [3.0, 4.0])
# trigger computation
self.generateSpikes(self.pre_neuron, [2.0 + self.decay_duration])
(Kplus, Kplus_triplet, _, _) = self.decay(
self.decay_duration, 1.0, 1.0, 0.0, 0.0)
Kplus += 1.0
Kplus_triplet += 1.0
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(
Kplus, self.status("Kplus"), "Kplus should have decay")
self.assertAlmostEqualDetailed(Kplus_triplet, self.status(
"Kplus_triplet"), "Kplus_triplet should have decay")
def test_weightChangeWhenPrePostSpikes(self):
"""Check that weight changes whenever a pre-post spike pair happen."""
self.generateSpikes(self.pre_neuron, [2.0])
self.generateSpikes(self.post_neuron, [4.0])
self.generateSpikes(self.pre_neuron, [6.0]) # trigger computation
Kplus = self.status("Kplus")
Kplus_triplet = self.status("Kplus_triplet")
Kminus = 0.0
Kminus_triplet = 0.0
weight = self.status("weight")
Wmax = self.status("Wmax")
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0, Kplus, Kplus_triplet, Kminus, Kminus_triplet)
weight = self.depress(weight, Kminus, Kplus_triplet)
Kplus += 1.0
Kplus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 + self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.facilitate(weight, Kplus, Kminus_triplet)
Kminus += 1.0
Kminus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 - self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.depress(weight, Kminus, Kplus_triplet)
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(weight, self.status(
"weight"), "weight should have decreased")
def test_weightChangeWhenPrePostPreSpikes(self):
"""Check that weight changes whenever a pre-post-pre spike triplet
happen."""
self.generateSpikes(self.pre_neuron, [2.0, 6.0])
self.generateSpikes(self.post_neuron, [4.0])
self.generateSpikes(self.pre_neuron, [8.0]) # trigger computation
Kplus = self.status("Kplus")
Kplus_triplet = self.status("Kplus_triplet")
Kminus = 0.0
Kminus_triplet = 0.0
weight = self.status("weight")
Wmax = self.status("Wmax")
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0, Kplus, Kplus_triplet, Kminus, Kminus_triplet)
weight = self.depress(weight, Kminus, Kplus_triplet)
Kplus += 1.0
Kplus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 + self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.facilitate(weight, Kplus, Kminus_triplet)
Kminus += 1.0
Kminus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0 - self.dendritic_delay, Kplus, Kplus_triplet,
Kminus, Kminus_triplet
)
weight = self.depress(weight, Kminus, Kplus_triplet)
Kplus += 1.0
Kplus_triplet += 1.0
(Kplus, Kplus_triplet, Kminus, Kminus_triplet) = self.decay(
2.0, Kplus, Kplus_triplet, Kminus, Kminus_triplet)
weight = self.depress(weight, Kminus, Kplus_triplet)
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(weight, self.status(
"weight"), "weight should have decreased")
def test_maxWeightStaturatesWeight(self):
"""Check that setting maximum weight property keep weight limited."""
limited_weight = self.status("weight") + 1e-10
limited_syn_spec = self.syn_spec.copy()
limited_syn_spec.update({"Wmax": limited_weight})
nest.Connect(self.pre_neuron, self.post_neuron,
syn_spec=limited_syn_spec)
self.generateSpikes(self.pre_neuron, [2.0])
self.generateSpikes(self.pre_neuron, [3.0]) # trigger computation
nest.Simulate(20.0)
self.assertAlmostEqualDetailed(limited_weight, self.status(
"weight"), "weight should have been limited")
@nest.ll_api.check_stack
class STDPTripletInhTestCase(STDPTripletConnectionTestCase):
def setUp(self):
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
# settings
self.dendritic_delay = 1.0
self.decay_duration = 5.0
self.synapse_model = "stdp_triplet_synapse"
self.syn_spec = {
"model": self.synapse_model,
"delay": self.dendritic_delay,
# set receptor 1 post-synaptically, to not generate extra spikes
"receptor_type": 1,
"weight": -5.0,
"tau_plus": 16.8,
"tau_plus_triplet": 101.0,
"Aplus": 0.1,
"Aminus": 0.1,
"Aplus_triplet": 0.1,
"Aminus_triplet": 0.1,
"Kplus": 0.0,
"Kplus_triplet": 0.0,
"Wmax": -100.0,
}
self.post_neuron_params = {
"tau_minus": 33.7,
"tau_minus_triplet": 125.0,
}
# setup basic circuit
self.pre_neuron = nest.Create("parrot_neuron")
self.post_neuron = nest.Create("parrot_neuron", 1,
params=self.post_neuron_params)
nest.Connect(self.pre_neuron, self.post_neuron, syn_spec=self.syn_spec)
def suite_inh():
return unittest.makeSuite(STDPTripletInhTestCase, "test")
def suite():
return unittest.makeSuite(STDPTripletConnectionTestCase, "test")
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
runner.run(suite_inh())
if __name__ == "__main__":
run()
|
hakonsbm/nest-simulator
|
pynest/nest/tests/test_stdp_triplet_synapse.py
|
Python
|
gpl-2.0
| 12,696
|
[
"NEURON"
] |
09386edcb20b723f96d677f2eadff7e4a0299755b4a2e8b42523ba8d18ca83e4
|
# -*- coding: utf-8 -*-
"""
"Sandbox" module for exploring API useful for digital labbooks.
Examples
--------
>>> from chempy.units import to_unitless, default_units as u
>>> s1 = Solution(0.1*u.dm3, {'CH3OH': 0.1 * u.molar})
>>> s2 = Solution(0.3*u.dm3, {'CH3OH': 0.4 * u.molar, 'Na+': 2e-3*u.molar, 'Cl-': 2e-3*u.molar})
>>> s3 = s1 + s2
>>> abs(to_unitless(s3.volume - 4e-4 * u.m**3, u.dm3)) < 1e-15
True
>>> s3.concentrations.isclose({'CH3OH': 0.325*u.molar, 'Na+': 1.5e-3*u.molar, 'Cl-': 1.5e-3*u.molar})
True
>>> s4 = s3.dissolve({'CH3OH': 1*u.gram})
>>> abs(s4.concentrations['CH3OH'] - (0.325 + 1/(12.011 + 4*1.008 + 15.999)/.4)*u.molar) < 1e-4
True
"""
import copy
from .chemistry import Substance
from .units import (
get_derived_unit,
html_of_unit,
is_unitless,
SI_base_registry,
to_unitless,
rescale,
default_units as u,
)
from .util.arithmeticdict import ArithmeticDict, _imul, _itruediv
from .printing import as_per_substance_html_table
class QuantityDict(ArithmeticDict):
def __init__(self, units, *args, **kwargs):
self.units = units
super(QuantityDict, self).__init__(lambda: 0 * self.units, *args, **kwargs)
self._check()
@classmethod
def of_quantity(cls, quantity_name, *args, **kwargs):
instance = cls(
get_derived_unit(SI_base_registry, quantity_name), *args, **kwargs
)
instance.quantity_name = quantity_name
return instance
def rescale(self, new_units):
return self.__class__(
new_units, {k: rescale(v, new_units) for k, v in self.items()}
)
def _repr_html_(self):
if hasattr(self, "quantity_name"):
header = self.quantity_name.capitalize() + " / "
else:
header = ""
header += html_of_unit(self.units)
tab = as_per_substance_html_table(to_unitless(self, self.units), header=header)
return tab._repr_html_()
def _check(self):
for k, v in self.items():
if not is_unitless(v / self.units):
raise ValueError(
"entry for %s (%s) is not compatible with %s" % (k, v, self.units)
)
def __setitem__(self, key, value):
if not is_unitless(value / self.units):
raise ValueError(
"entry for %s (%s) is not compatible with %s" % (key, value, self.units)
)
super(QuantityDict, self).__setitem__(key, value)
def copy(self):
return self.__class__(self.units, copy.deepcopy(list(self.items())))
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, repr(self.units), dict(self)
)
def __mul__(self, other):
d = dict(copy.deepcopy(list(self.items())))
_imul(d, other)
return self.__class__(self.units * getattr(other, "units", 1), d)
def __truediv__(self, other):
d = dict(copy.deepcopy(list(self.items())))
_itruediv(d, other)
return self.__class__(self.units / getattr(other, "units", 1), d)
def __floordiv__(self, other):
a = self.copy()
if getattr(other, "units", 1) != 1:
raise ValueError("Floor division with quantities not defined")
a //= other
return a
def __rtruediv__(self, other):
""" other / self """
return self.__class__(
getattr(other, "units", 1) / self.units,
{k: other / v for k, v in self.items()},
)
def __rfloordiv__(self, other):
""" other // self """
return self.__class__(
getattr(other, "units", 1) / self.units,
{k: other // v for k, v in self.items()},
)
class AutoRegisteringSubstanceDict(object):
def __init__(self, factory=Substance.from_formula):
self.factory = factory
self._store = {}
def __getitem__(self, key):
if key not in self._store:
self._store[key] = self.factory(key)
return self._store[key]
class Solution(object):
def __init__(self, volume, concentrations, substances=None, solvent=None):
if not is_unitless(volume / u.dm3):
raise ValueError("volume need to have a unit (e.g. dm3)")
self.volume = volume
self.concentrations = QuantityDict(u.molar, concentrations)
if substances is None:
substances = AutoRegisteringSubstanceDict()
self.substances = substances
self.solvent = solvent
def __eq__(self, other):
if not isinstance(other, Solution):
return NotImplemented
return all(
[
getattr(self, k) == getattr(other, k)
for k in "volume concentrations substances solvent".split()
]
)
def __add__(self, other):
if self.solvent != other.solvent:
raise NotImplementedError(
"Mixed solvent should be represented as concentrations"
)
tot_amount = (
self.concentrations * self.volume + other.concentrations * other.volume
)
tot_vol = self.volume + other.volume
return Solution(tot_vol, tot_amount / tot_vol, self.substances, self.solvent)
def dissolve(self, masses):
contrib = QuantityDict(
u.molar,
{
k: v / self.substances[k].molar_mass() / self.volume
for k, v in masses.items()
},
)
return Solution(
self.volume, self.concentrations + contrib, self.substances, self.solvent
)
def withdraw(self, volume):
if volume > self.volume:
raise ValueError(
"Cannot withdraw a volume greater than the solution volume"
)
if volume < volume * 0:
raise ValueError("Cannot withdraw a negative volume")
self.volume -= volume
return Solution(volume, self.concentrations, self.substances, self.solvent)
|
bjodah/aqchem
|
chempy/_solution.py
|
Python
|
bsd-2-clause
| 5,994
|
[
"ChemPy"
] |
e30069c3acd106c5013fc6b2e9c103385c34cb420d8f9fab5dde1782a1d83f94
|
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2012-2020 Dominik Kriegner <dominik.kriegner@gmail.com>
import unittest
import xrayutilities as xu
class Test_findsym(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.materials = []
for name, obj in xu.materials.predefined_materials.__dict__.items():
if isinstance(obj, xu.materials.Crystal):
cls.materials.append(obj)
def test_findsym(self):
"""
Test that built in materials use the highest possible space group
setting for their given unit cell.
"""
for m in self.materials:
p1 = m.lattice.convert_to_P1()
self.assertEqual(m.lattice, p1.findsym(),
msg=f"{m.name} does not use highest symmetry!")
if __name__ == '__main__':
unittest.main()
|
dkriegner/xrayutilities
|
tests/test_materials_findsym.py
|
Python
|
gpl-2.0
| 1,502
|
[
"CRYSTAL"
] |
1c608e5f3cb492a94b299d866cbfa99fb3a4826447912685180c66aab3211ce4
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from .mediaitem import ImageMediaItem
from .imagetab import ImageTab
|
marmyshev/item_title
|
openlp/plugins/images/lib/__init__.py
|
Python
|
gpl-2.0
| 2,176
|
[
"Brian"
] |
7c39d93a464abc3dffc5beae34e7765b30139e9a048be95cefa62fdc51b56ddb
|
#removes first 2 lines from all outputfiles
import operator
import os
from galaxy import datatypes
def exec_after_process(app, inp_data, out_data, param_dict,tool, stdout, stderr):
for data_count in range(len(out_data)):
output_filename = param_dict.get( 'out_file'+str(data_count+1), None )
if output_filename != None:
file = open(output_filename, 'r')
contents = file.readlines()
file.close()
contents = contents[3:]
file = open(output_filename, 'w')
for line in contents:
file.write(line)
file.close()
|
jmchilton/galaxy-central
|
tools/emboss/phylipnew/remove_first_2_lines.py
|
Python
|
mit
| 628
|
[
"Galaxy"
] |
6a5e531e3d3fb21eb8183232b746947882816f10f7c0280af9b8fd9734613f11
|
"""Definitions of Celery tasks in Askbot
in this module there are two types of functions:
* those wrapped with a @task decorator and a ``_celery_task`` suffix - celery tasks
* those with the same base name, but without the decorator and the name suffix
the actual work units run by the task
Celery tasks are special functions in a way that they require all the parameters
be serializable - so instead of ORM objects we pass object id's and
instead of query sets - lists of ORM object id's.
That is the reason for having two types of methods here:
* the base methods (those without the decorator and the
``_celery_task`` in the end of the name
are work units that are called from the celery tasks.
* celery tasks - shells that reconstitute the necessary ORM
objects and call the base methods
"""
import sys
import traceback
import logging
import uuid
from django.contrib.contenttypes.models import ContentType
from django.template import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from django.utils import simplejson
from celery import task
from askbot.conf import settings as askbot_settings
from askbot import const
from askbot import mail
from askbot import models
from askbot.models import Post, Thread, User, ReplyAddress
from askbot.models.badges import award_badges_signal
from askbot.models import get_reply_to_addresses, format_instant_notification_email
from askbot import exceptions as askbot_exceptions
from askbot.utils.twitter import Twitter
from askbot.models.user import Group
from askbot.models.post import PostToGroup
# TODO: Make exceptions raised inside record_post_update_celery_task() ...
# ... propagate upwards to test runner, if only CELERY_ALWAYS_EAGER = True
# (i.e. if Celery tasks are not deferred but executed straight away)
@task(ignore_result=True)
def tweet_new_post_task(post_id):
post = Post.objects.get(id=post_id)
is_mod = post.author.is_administrator_or_moderator()
if is_mod or post.author.reputation > askbot_settings.MIN_REP_TO_TWEET_ON_OTHERS_ACCOUNTS:
tweeters = User.objects.filter(social_sharing_mode=const.SHARE_EVERYTHING)
tweeters = tweeters.exclude(id=post.author.id)
access_tokens = tweeters.values_list('twitter_access_token', flat=True)
else:
access_tokens = list()
tweet_text = post.as_tweet()
twitter = Twitter()
for raw_token in access_tokens:
token = simplejson.loads(raw_token)
twitter.tweet(tweet_text, access_token=token)
if post.author.social_sharing_mode != const.SHARE_NOTHING:
token = simplejson.loads(post.author.twitter_access_token)
twitter.tweet(tweet_text, access_token=token)
@task(ignore_result = True)
def notify_author_of_published_revision_celery_task(revision):
#todo: move this to ``askbot.mail`` module
#for answerable email only for now, because
#we don't yet have the template for the read-only notification
if askbot_settings.REPLY_BY_EMAIL:
#generate two reply codes (one for edit and one for addition)
#to format an answerable email or not answerable email
reply_options = {
'user': revision.author,
'post': revision.post,
'reply_action': 'append_content'
}
append_content_address = ReplyAddress.objects.create_new(
**reply_options
).as_email_address()
reply_options['reply_action'] = 'replace_content'
replace_content_address = ReplyAddress.objects.create_new(
**reply_options
).as_email_address()
#populate template context variables
reply_code = append_content_address + ',' + replace_content_address
if revision.post.post_type == 'question':
mailto_link_subject = revision.post.thread.title
else:
mailto_link_subject = _('An edit for my answer')
#todo: possibly add more mailto thread headers to organize messages
prompt = _('To add to your post EDIT ABOVE THIS LINE')
reply_separator_line = const.SIMPLE_REPLY_SEPARATOR_TEMPLATE % prompt
data = {
'site_name': askbot_settings.APP_SHORT_NAME,
'post': revision.post,
'author_email_signature': revision.author.email_signature,
'replace_content_address': replace_content_address,
'reply_separator_line': reply_separator_line,
'mailto_link_subject': mailto_link_subject,
'reply_code': reply_code
}
#load the template
template = get_template('email/notify_author_about_approved_post.html')
#todo: possibly add headers to organize messages in threads
headers = {'Reply-To': append_content_address}
#send the message
mail.send_mail(
subject_line = _('Your post at %(site_name)s is now published') % data,
body_text = template.render(Context(data)),
recipient_list = [revision.author.email,],
related_object = revision,
activity_type = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT,
headers = headers
)
@task(ignore_result = True)
def record_post_update_celery_task(
post_id,
post_content_type_id,
newly_mentioned_user_id_list=None,
updated_by_id=None,
suppress_email=False,
timestamp=None,
created=False,
diff=None,
):
#reconstitute objects from the database
updated_by = User.objects.get(id=updated_by_id)
post_content_type = ContentType.objects.get(id=post_content_type_id)
post = post_content_type.get_object_for_this_type(id=post_id)
newly_mentioned_users = User.objects.filter(
id__in=newly_mentioned_user_id_list
)
try:
notify_sets = post.get_notify_sets(
mentioned_users=newly_mentioned_users,
exclude_list=[updated_by,]
)
#todo: take into account created == True case
#update_object is not used
(activity_type, update_object) = post.get_updated_activity_data(created)
post.issue_update_notifications(
updated_by=updated_by,
notify_sets=notify_sets,
activity_type=activity_type,
suppress_email=suppress_email,
timestamp=timestamp,
diff=diff
)
except Exception:
# HACK: exceptions from Celery job don't propagate upwards
# to the Django test runner
# so at least let's print tracebacks
print >>sys.stderr, unicode(traceback.format_exc()).encode('utf-8')
raise
@task(ignore_result=True)
def toggle_favorite_question(user=None,
question=None):
# todo: do not pass model object as an argument to avoid race conditions
user.toggle_favorite_question(question)
@task(ignore_result=True)
def make_thread_private(
question=None,
user=None,
group_id=None):
# todo: do not pass model object as an argument to avoid race conditions
question.thread.make_private(user=user, group_id=group_id)
@task(ignore_result=True)
def make_thread_public(
question=None,
recursive=False):
# todo: do not pass model object as an argument to avoid race conditions
question.thread.make_public(recursive=recursive)
@task(ignore_result=True)
def remove_draft_answer(author=None, thread=None):
drafts = models.DraftAnswer.objects.filter(
author=author,
thread=thread
)
drafts.delete()
@task(ignore_result=True)
def remove_draft_question(author=None):
drafts = models.DraftQuestion.objects.filter(
author=author
)
drafts.delete()
@task(ignore_result=True)
def add_post_revision(
post_id=None,
author=None,
revised_at=None,
text=None,
comment=None,
by_email=False,
**kwargs
):
post = Post.objects.get(id=post_id)
post.add_revision(
author = author,
revised_at = revised_at,
text = text,
comment = comment,
by_email = by_email,
**kwargs
)
@task(ignore_result=True)
def add_post_to_groups(post=None,
groups=[]):
# todo: do not pass model object as an argument to avoid race conditions
"""associates post with groups"""
#this is likely to be temporary - we add
#vip groups to the list behind the scenes.
groups = list(groups)
vips = Group.objects.filter(is_vip=True)
groups.extend(vips)
#todo: use bulk-creation
for group in groups:
PostToGroup.objects.get_or_create(post=post, group=group)
if post.is_answer() or post.is_question():
comments = post.comments.all()
for group in groups:
for comment in comments:
PostToGroup.objects.get_or_create(post=comment, group=group)
@task(ignore_result = True)
def set_thread_last_activity(thread=None,
last_activity_at=None,
last_activity_by=None):
thread.set_last_activity(last_activity_at=last_activity_at,
last_activity_by=last_activity_by)
@task(ignore_result = True)
def record_question_visit(
question_post = None,
user_id = None,
update_view_count = False):
"""celery task which records question visit by a person
updates view counter, if necessary,
and awards the badges associated with the
question visit
"""
#1) maybe update the view count
#question_post = Post.objects.filter(
# id = question_post_id
#).select_related('thread')[0]
if update_view_count:
question_post.thread.increase_view_count()
#we do not track visits per anon user
if user_id is None:
return
user = User.objects.get(id=user_id)
#2) question view count per user and clear response displays
#user = User.objects.get(id = user_id)
if user.is_authenticated():
#get response notifications
user.visit_question(question_post)
#3) send award badges signal for any badges
#that are awarded for question views
award_badges_signal.send(None,
event = 'view_question',
actor = user,
context_object = question_post,
)
@task()
def send_instant_notifications_about_activity_in_post(
update_activity = None,
post = None,
recipients = None,
):
#reload object from the database
post = Post.objects.get(id=post.id)
if post.is_approved() is False:
return
if recipients is None:
return
acceptable_types = const.RESPONSE_ACTIVITY_TYPES_FOR_INSTANT_NOTIFICATIONS
if update_activity.activity_type not in acceptable_types:
return
#calculate some variables used in the loop below
update_type_map = const.RESPONSE_ACTIVITY_TYPE_MAP_FOR_TEMPLATES
update_type = update_type_map[update_activity.activity_type]
origin_post = post.get_origin_post()
headers = mail.thread_headers(
post,
origin_post,
update_activity.activity_type
)
logger = logging.getLogger()
if logger.getEffectiveLevel() <= logging.DEBUG:
log_id = uuid.uuid1()
message = 'email-alert %s, logId=%s' % (post.get_absolute_url(), log_id)
logger.debug(message)
else:
log_id = None
for user in recipients:
if user.is_blocked():
continue
reply_address, alt_reply_address = get_reply_to_addresses(user, post)
subject_line, body_text = format_instant_notification_email(
to_user = user,
from_user = update_activity.user,
post = post,
reply_address = reply_address,
alt_reply_address = alt_reply_address,
update_type = update_type,
template = get_template('email/instant_notification.html')
)
headers['Reply-To'] = reply_address
try:
mail.send_mail(
subject_line=subject_line,
body_text=body_text,
recipient_list=[user.email],
related_object=origin_post,
activity_type=const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT,
headers=headers,
raise_on_failure=True
)
except askbot_exceptions.EmailNotSent, error:
logger.debug(
'%s, error=%s, logId=%s' % (user.email, error, log_id)
)
else:
logger.debug('success %s, logId=%s' % (user.email, log_id))
|
simalytics/askbot-devel
|
askbot/tasks.py
|
Python
|
gpl-3.0
| 13,501
|
[
"VisIt"
] |
7fa2ae527fbb29971564a3a7fa8984343d96e80c56ba2053db599d662e3f1c2f
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from
it. The default behaviour is False which reads from
the current position of the file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 == None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS
:type headers: dict
:param headers: Additional headers to pass along with the request to GS.
:type replace: bool
:param replace: If True, replaces the contents of the file if it
already exists.
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from GS and the second representing
the total number of bytes that need to be transmitted.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket != None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket != None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket != None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket != None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket != None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
|
mattcaldwell/boto
|
boto/gs/key.py
|
Python
|
mit
| 40,236
|
[
"VisIt"
] |
9aabf45e1d30cb7246d56c35af3e6648a0c95ea111971e4fc0d65140dc59aafe
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS that utilize the
progress page.
"""
from __future__ import absolute_import
from six.moves import range
from contextlib import contextmanager
import ddt
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.common.logout import LogoutPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage, StudentSpecificAdmin
from ...pages.lms.problem import ProblemPage
from ...pages.lms.progress import ProgressPage
from ...pages.studio.xblock_editor import XBlockEditorView
from ...pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from ...pages.studio.utils import type_in_codemirror
from ..helpers import (
UniqueCourseTest,
auto_auth,
create_multiple_choice_problem,
create_multiple_choice_xml,
get_modal_alert
)
class ProgressPageBaseTest(UniqueCourseTest):
"""
Provides utility methods for tests retrieving
scores from the progress page.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
SECTION_NAME = 'Test Section 1'
SUBSECTION_NAME = 'Test Subsection 1'
UNIT_NAME = 'Test Unit 1'
PROBLEM_NAME = 'Test Problem 1'
PROBLEM_NAME_2 = 'Test Problem 2'
def setUp(self):
super(ProgressPageBaseTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.problem_page = ProblemPage(self.browser)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.logout_page = LogoutPage(self.browser)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with problems
self.course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
self.problem1 = create_multiple_choice_problem(self.PROBLEM_NAME)
self.problem2 = create_multiple_choice_problem(self.PROBLEM_NAME_2)
self.course_fix.add_children(
XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', self.SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', self.UNIT_NAME).add_children(self.problem1, self.problem2)
)
),
XBlockFixtureDesc('chapter', "Lab Section").add_children(
XBlockFixtureDesc('sequential', "Lab Subsection").add_children(
XBlockFixtureDesc('vertical', "Lab Unit").add_children(
create_multiple_choice_problem("Lab Exercise")
)
)
)
).install()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _answer_problem_correctly(self):
"""
Submit a correct answer to the problem.
"""
self._answer_problem(choice=2)
def _answer_problem(self, choice):
"""
Submit the given choice for the problem.
"""
self.courseware_page.go_to_sequential_position(1)
self.problem_page.click_choice('choice_choice_{}'.format(choice))
self.problem_page.click_submit()
def _get_section_score(self):
"""
Return a list of scores from the progress page.
"""
self.progress_page.visit()
return self.progress_page.section_score(self.SECTION_NAME, self.SUBSECTION_NAME)
def _get_problem_scores(self):
"""
Return a list of scores from the progress page.
"""
self.progress_page.visit()
return self.progress_page.scores(self.SECTION_NAME, self.SUBSECTION_NAME)
@contextmanager
def _logged_in_session(self, staff=False):
"""
Ensure that the user is logged in and out appropriately at the beginning
and end of the current test. But if there's an error, don't log out
before capturing a screenshot.
"""
self.logout_page.visit()
if staff:
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
else:
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
yield
self.logout_page.visit()
@ddt.ddt
class PersistentGradesTest(ProgressPageBaseTest):
"""
Test that grades for completed assessments are persisted
when various edits are made.
"""
shard = 22
def setUp(self):
super(PersistentGradesTest, self).setUp()
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
def _change_subsection_structure(self):
"""
Adds a unit to the subsection, which
should not affect a persisted subsection grade.
"""
self.studio_course_outline.visit()
subsection = self.studio_course_outline.section(self.SECTION_NAME).subsection(self.SUBSECTION_NAME)
subsection.expand_subsection()
subsection.add_unit()
self.studio_course_outline.wait_for_ajax()
subsection.publish()
def _set_staff_lock_on_subsection(self, locked):
"""
Sets staff lock for a subsection, which should hide the
subsection score from students on the progress page.
"""
self.studio_course_outline.visit()
subsection = self.studio_course_outline.section_at(0).subsection_at(0)
subsection.set_staff_lock(locked)
self.assertEqual(subsection.has_staff_lock_warning, locked)
def _get_problem_in_studio(self):
"""
Returns the editable problem component in studio,
along with its container unit, so any changes can
be published.
"""
self.studio_course_outline.visit()
self.studio_course_outline.section_at(0).subsection_at(0).expand_subsection()
unit = self.studio_course_outline.section_at(0).subsection_at(0).unit(self.UNIT_NAME).go_to()
component = unit.xblocks[1]
return unit, component
def _change_weight_for_problem(self):
"""
Changes the weight of the problem, which should not affect
persisted grades.
"""
unit, component = self._get_problem_in_studio()
component.edit()
component_editor = XBlockEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Problem Weight', 5)
unit.publish()
def _change_correct_answer_for_problem(self, new_correct_choice=1):
"""
Changes the correct answer of the problem.
"""
unit, component = self._get_problem_in_studio()
modal = component.edit()
modified_content = create_multiple_choice_xml(correct_choice=new_correct_choice)
type_in_codemirror(self, 0, modified_content)
modal.q(css='.action-save').click()
unit.publish()
def _student_admin_action_for_problem(self, action_button, has_cancellable_alert=False):
"""
As staff, clicks the "delete student state" button,
deleting the student user's state for the problem.
"""
self.instructor_dashboard_page.visit()
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentSpecificAdmin)
student_admin_section.set_student_email_or_username(self.USERNAME)
student_admin_section.set_problem_location(self.problem1.locator)
getattr(student_admin_section, action_button).click()
if has_cancellable_alert:
alert = get_modal_alert(student_admin_section.browser)
alert.accept()
alert = get_modal_alert(student_admin_section.browser)
alert.dismiss()
return student_admin_section
def test_progress_page_shows_scored_problems(self):
"""
Checks the progress page before and after answering
the course's first problem correctly.
"""
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(0, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (0, 2))
self.courseware_page.visit()
self._answer_problem_correctly()
self.assertEqual(self._get_problem_scores(), [(1, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (1, 2))
@ddt.data(
_change_correct_answer_for_problem,
_change_subsection_structure,
_change_weight_for_problem
)
def test_content_changes_do_not_change_score(self, edit):
with self._logged_in_session():
self.courseware_page.visit()
self._answer_problem_correctly()
with self._logged_in_session(staff=True):
edit(self)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(1, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (1, 2))
def test_visibility_change_affects_score(self):
with self._logged_in_session():
self.courseware_page.visit()
self._answer_problem_correctly()
with self._logged_in_session(staff=True):
self._set_staff_lock_on_subsection(True)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), None)
self.assertEqual(self._get_section_score(), None)
with self._logged_in_session(staff=True):
self._set_staff_lock_on_subsection(False)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(1, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (1, 2))
def test_delete_student_state_affects_score(self):
with self._logged_in_session():
self.courseware_page.visit()
self._answer_problem_correctly()
with self._logged_in_session(staff=True):
self._student_admin_action_for_problem('delete_state_button', has_cancellable_alert=True)
with self._logged_in_session():
self.assertEqual(self._get_problem_scores(), [(0, 1), (0, 1)])
self.assertEqual(self._get_section_score(), (0, 2))
class SubsectionGradingPolicyBase(ProgressPageBaseTest):
"""
Base class for testing a subsection and its impact to
the progress page
"""
def setUp(self):
super(SubsectionGradingPolicyBase, self).setUp()
self._set_policy_for_subsection("Homework", 0)
self._set_policy_for_subsection("Lab", 1)
def _set_policy_for_subsection(self, policy, section=0):
"""
Set the grading policy for the first subsection in the specified section.
If a section index is not provided, 0 is assumed.
"""
with self._logged_in_session(staff=True):
self.studio_course_outline.visit()
modal = self.studio_course_outline.section_at(section).subsection_at(0).edit()
modal.policy = policy
modal.save()
def _check_scores_and_page_text(self, problem_scores, section_score, text):
"""
Asserts that the given problem and section scores, and text,
appear on the progress page.
"""
self.assertEqual(self._get_problem_scores(), problem_scores)
self.assertEqual(self._get_section_score(), section_score)
self.assertTrue(self.progress_page.text_on_page(text))
def _check_tick_text(self, index, sr_text, label, label_hidden=True):
"""
Check the label and sr text for a horizontal (X-axis) tick.
"""
self.assertEqual(sr_text, self.progress_page.x_tick_sr_text(index))
self.assertEqual([label, 'true' if label_hidden else None], self.progress_page.x_tick_label(index))
class SubsectionGradingPolicyA11yTest(SubsectionGradingPolicyBase):
"""
Class to test the accessibility of subsection grading
"""
a11y = True
def test_axis_a11y(self):
"""
Tests that the progress chart axes have appropriate a11y (screenreader) markup.
"""
with self._logged_in_session():
self.courseware_page.visit()
# Answer the first HW problem (the unit contains 2 problems, only one will be answered correctly)
self._answer_problem_correctly()
self.courseware_page.click_next_button_on_top()
# Answer the first Lab problem (unit only contains a single problem)
self._answer_problem_correctly()
self.progress_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.progress_page.visit()
# Verify the basic a11y of the progress page
self.progress_page.a11y_audit.check_for_accessibility_errors()
# Verify that y-Axis labels are aria-hidden
self.assertEqual(['100%', 'true'], self.progress_page.y_tick_label(0))
self.assertEqual(['0%', 'true'], self.progress_page.y_tick_label(1))
self.assertEqual(['Pass 50%', 'true'], self.progress_page.y_tick_label(2)) # pylint: disable=unicode-format-string,line-too-long
# Verify x-Axis labels and sr-text
self._check_tick_text(0, [u'Homework 1 - Test Subsection 1 - 50% (1/2)'], u'HW 01')
# Homeworks 2-10 are checked in the for loop below.
self._check_tick_text(
10,
[u'Homework 11 Unreleased - 0% (?/?)', u'The lowest 2 Homework scores are dropped.'],
u'HW 11'
)
self._check_tick_text(
11,
[u'Homework 12 Unreleased - 0% (?/?)', u'The lowest 2 Homework scores are dropped.'],
u'HW 12'
)
self._check_tick_text(12, [u'Homework Average = 5%'], u'HW Avg')
self._check_tick_text(13, [u'Lab 1 - Lab Subsection - 100% (1/1)'], u'Lab 01')
# Labs 2-10 are checked in the for loop below.
self._check_tick_text(
23,
[u'Lab 11 Unreleased - 0% (?/?)', u'The lowest 2 Lab scores are dropped.'],
u'Lab 11'
)
self._check_tick_text(
24,
[u'Lab 12 Unreleased - 0% (?/?)', u'The lowest 2 Lab scores are dropped.'],
u'Lab 12'
)
self._check_tick_text(25, [u'Lab Average = 10%'], u'Lab Avg')
self._check_tick_text(26, [u'Midterm Exam = 0%'], u'Midterm')
self._check_tick_text(27, [u'Final Exam = 0%'], u'Final')
self._check_tick_text(
28,
[u'Homework = 0.75% of a possible 15.00%', u'Lab = 1.50% of a possible 15.00%'],
u'Total',
False # The label "Total" should NOT be aria-hidden
)
# The grading policy has 12 Homeworks and 12 Labs. Most of them are unpublished,
# with no additional information.
for i in range(1, 10):
self._check_tick_text(
i,
[u'Homework {index} Unreleased - 0% (?/?)'.format(index=i + 1)],
u'HW 0{index}'.format(index=i + 1) if i < 9 else u'HW {index}'.format(index=i + 1)
)
self._check_tick_text(
i + 13,
[u'Lab {index} Unreleased - 0% (?/?)'.format(index=i + 1)],
u'Lab 0{index}'.format(index=i + 1) if i < 9 else u'Lab {index}'.format(index=i + 1)
)
# Verify the overall score. The first element in the array is the sr-only text, and the
# second is the total text (including the sr-only text).
self.assertEqual(['Overall Score', 'Overall Score\n2%'], self.progress_page.graph_overall_score()) # pylint: disable=unicode-format-string,line-too-long
class ProgressPageA11yTest(ProgressPageBaseTest):
"""
Class to test the accessibility of the progress page.
"""
a11y = True
def test_progress_page_a11y(self):
"""
Test the accessibility of the progress page.
"""
self.progress_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.progress_page.visit()
self.progress_page.a11y_audit.check_for_accessibility_errors()
|
jolyonb/edx-platform
|
common/test/acceptance/tests/lms/test_progress_page.py
|
Python
|
agpl-3.0
| 16,866
|
[
"VisIt"
] |
5406348640bc2e33ebc63e934ac120cef20291c2247aebf74f3fa853fd579e14
|
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to use the Closure Compiler CLI from Python."""
import logging
import os
import re
import subprocess
# Pulls just the major and minor version numbers from the first line of
# 'java -version'. Versions are in the format of [0-9]+\.[0-9]+\..* See:
# http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
_VERSION_REGEX = re.compile(r'"([0-9]+)\.([0-9]+)')
class JsCompilerError(Exception):
"""Raised if there's an error in calling the compiler."""
pass
def _GetJavaVersionString():
"""Get the version string from the Java VM."""
return subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
def _ParseJavaVersion(version_string):
"""Returns a 2-tuple for the current version of Java installed.
Args:
version_string: String of the Java version (e.g. '1.7.2-ea').
Returns:
The major and minor versions, as a 2-tuple (e.g. (1, 7)).
"""
match = _VERSION_REGEX.search(version_string)
if match:
version = tuple(int(x, 10) for x in match.groups())
assert len(version) == 2
return version
def _JavaSupports32BitMode():
"""Determines whether the JVM supports 32-bit mode on the platform."""
# Suppresses process output to stderr and stdout from showing up in the
# console as we're only trying to determine 32-bit JVM support.
supported = False
try:
devnull = open(os.devnull, 'wb')
return subprocess.call(
['java', '-d32', '-version'], stdout=devnull, stderr=devnull) == 0
except IOError:
pass
else:
devnull.close()
return supported
def _GetJsCompilerArgs(compiler_jar_path, java_version, source_paths,
jvm_flags, compiler_flags):
"""Assembles arguments for call to JsCompiler."""
if java_version < (1, 6):
raise JsCompilerError('Closure Compiler requires Java 1.6 or higher. '
'Please visit http://www.java.com/getjava')
args = ['java']
# Add JVM flags we believe will produce the best performance. See
# https://groups.google.com/forum/#!topic/closure-library-discuss/7w_O9-vzlj4
# Attempt 32-bit mode if available (Java 7 on Mac OS X does not support 32-bit
# mode, for example).
if _JavaSupports32BitMode():
args += ['-d32']
# Prefer the "client" VM.
args += ['-client']
# Add JVM flags, if any
if jvm_flags:
args += jvm_flags
# Add the application JAR.
args += ['-jar', compiler_jar_path]
for path in source_paths:
args += ['--js', path]
# Add compiler flags, if any.
if compiler_flags:
args += compiler_flags
return args
def Compile(compiler_jar_path, source_paths,
jvm_flags=None,
compiler_flags=None):
"""Prepares command-line call to Closure Compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
jvm_flags: A list of additional flags to pass on to JVM.
compiler_flags: A list of additional flags to pass on to Closure Compiler.
Returns:
The compiled source, as a string, or None if compilation failed.
"""
java_version = _ParseJavaVersion(_GetJavaVersionString())
args = _GetJsCompilerArgs(
compiler_jar_path, java_version, source_paths, jvm_flags, compiler_flags)
logging.info('Compiling with the following command: %s', ' '.join(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError:
raise JsCompilerError('JavaScript compilation failed.')
|
dmincu/IOC
|
new_php/closure-library/closure/bin/build/jscompiler.py
|
Python
|
mit
| 4,237
|
[
"VisIt"
] |
abc85f59bd33324283bae70d8d193a5d73e2b641a5d62a89b5fa99e175cb9580
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import errno
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.display_functions import *
from ansible.utils.plugins import *
from ansible.utils.su_prompts import *
from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
from ansible.callbacks import display
from ansible.module_utils.splitter import split_args, unquote
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.utils.unicode import to_bytes, to_unicode
import ansible.constants as C
import ast
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import subprocess
import contextlib
from vault import VaultLib
VERBOSITY=0
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
# caching the compilation of the regex used
# to check for lookup calls within data
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
CODE_REGEX = re.compile(r'(?:{%|%})')
try:
# simplejson can be much faster if it's available
import simplejson as json
except ImportError:
import json
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
import builtin
except ImportError:
import __builtin__ as builtin
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
os.makedirs(key_path, mode=0700)
os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
elif not os.path.isdir(key_path):
raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate(size=256)
fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
fh = os.fdopen(fd, 'w')
fh.write(str(key))
fh.close()
return key
else:
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg.encode('utf-8'))
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def read_vault_file(vault_password_file):
"""Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
if vault_password_file:
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError), e:
raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
return vault_pass
else:
return None
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
buf = to_bytes(buf)
with open(path, 'wb+') as fd:
fd.write(buf)
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
from ansible.utils import template
if conditional is None or conditional == '':
return True
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
return False
return True
if not isinstance(conditional, basestring):
return conditional
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
conditional = to_unicode(inject[conditional], nonstring='simplerepr')
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in conditional:
return True
elif "is defined" in conditional:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("'"):
given = given[1:-1]
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
if basedir is None:
basedir = "."
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
from ansible.utils import template
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
role_spec = role_spec.strip()
role_version = ''
default_role_versions = dict(git='master', hg='tip')
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
def role_yaml_parse(role):
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role_info = role_spec_parse(role['role'])
if isinstance(role_info, dict):
# Warning: Slight change in behaviour here. name may be being
# overloaded. Previously, name was only a parameter to the role.
# Now it is both a parameter to the role and the name that
# ansible-galaxy will install under on the local system.
if 'name' in role and 'name' in role_info:
del role_info['name']
role.update(role_info)
else:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
return role
def json_loads(data):
''' parse a JSON string and return a data structure '''
try:
loaded = json.loads(data)
except ValueError,e:
raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
return loaded
def _clean_data(orig_data, from_remote=False, from_inventory=False):
''' remove jinja2 template tags from a string '''
if not isinstance(orig_data, basestring):
return orig_data
# when the data is marked as having come from a remote, we always
# replace any print blocks (ie. {{var}}), however when marked as coming
# from inventory we only replace print blocks that contain a call to
# a lookup plugin (ie. {{lookup('foo','bar'))}})
replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
with contextlib.closing(StringIO.StringIO(orig_data)) as data:
# these variables keep track of opening block locations, as we only
# want to replace matched pairs of print/block tags
print_openings = []
block_openings = []
for mo in regex.finditer(orig_data):
token = mo.group(0)
token_start = mo.start(0)
if token[0] == '{':
if token == '{%':
block_openings.append(token_start)
elif token == '{{':
print_openings.append(token_start)
elif token[1] == '}':
prev_idx = None
if token == '%}' and block_openings:
prev_idx = block_openings.pop()
elif token == '}}' and print_openings:
prev_idx = print_openings.pop()
if prev_idx is not None:
# replace the opening
data.seek(prev_idx, os.SEEK_SET)
data.write('{#')
# replace the closing
data.seek(token_start, os.SEEK_SET)
data.write('#}')
else:
assert False, 'Unhandled regex match'
return data.getvalue()
def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
'''
walk a complex data structure, and use _clean_data() to
remove any template tags that may exist
'''
if not from_remote and not from_inventory:
raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
if isinstance(orig_data, dict):
data = orig_data.copy()
for key in data:
new_key = _clean_data_struct(key, from_remote, from_inventory)
new_val = _clean_data_struct(data[key], from_remote, from_inventory)
if key != new_key:
del data[key]
data[new_key] = new_val
elif isinstance(orig_data, list):
data = orig_data[:]
for i in range(0, len(data)):
data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
elif isinstance(orig_data, basestring):
data = _clean_data(orig_data, from_remote, from_inventory)
else:
data = orig_data
return data
def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
results = json.loads(data)
except:
if no_exceptions:
return dict(failed=True, parsed=False, msg=raw_data)
else:
raise
if from_remote:
results = _clean_data_struct(results, from_remote, from_inventory)
return results
def serialize_args(args):
'''
Flattens a dictionary args to a k=v string
'''
module_args = ""
for (k,v) in args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
elif isinstance(v, bool):
module_args = "%s=%s %s" % (k, str(v), module_args)
return module_args.strip()
def merge_module_args(current_args, new_args):
'''
merges either a dictionary or string of k=v pairs with another string of k=v pairs,
and returns a new k=v string without duplicates.
'''
if not isinstance(current_args, basestring):
raise errors.AnsibleError("expected current_args to be a basestring")
# we use parse_kv to split up the current args into a dictionary
final_args = parse_kv(current_args)
if isinstance(new_args, dict):
final_args.update(new_args)
elif isinstance(new_args, basestring):
new_args_kv = parse_kv(new_args)
final_args.update(new_args_kv)
return serialize_args(final_args)
def parse_yaml(data, path_hint=None):
''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
stripped_data = data.lstrip()
loaded = None
if stripped_data.startswith("{") or stripped_data.startswith("["):
# since the line starts with { or [ we can infer this is a JSON document.
try:
loaded = json.loads(data)
except ValueError, ve:
if path_hint:
raise errors.AnsibleError(path_hint + ": " + str(ve))
else:
raise errors.AnsibleError(str(ve))
else:
# else this is pretty sure to be a YAML document
loaded = yaml.load(data, Loader=Loader)
return loaded
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if ":{{" in replaced and "}}" in replaced:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None, show_content=True):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if show_content:
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
else:
# most likely displaying a file with sensitive content,
# so don't show any of the actual lines of yaml just the
# line number itself
msg = """Syntax error while loading YAML script, %s
The error appears to have been on line %s, column %s, but may actually
be before there depending on the exact syntax problem.
""" % (path, mark.line + 1, mark.column + 1)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path, vault_password=None):
''' convert a yaml file to a data structure '''
data = None
show_content = True
try:
data = open(path).read()
except IOError:
raise errors.AnsibleError("file could not read: %s" % path)
vault = VaultLib(password=vault_password)
if vault.is_encrypted(data):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
if vault_password is None:
raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
data = vault.decrypt(data)
show_content = False
try:
return parse_yaml(data, path_hint=path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path, show_content)
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError, ve:
if 'no closing quotation' in str(ve).lower():
raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
for x in vargs:
if "=" in x:
k, v = x.split("=",1)
options[k.strip()] = unquote(v.strip())
return options
def _validate_both_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise errors.AnsibleError(
"failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
)
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = {}
# we check here as well as in combine_vars() since this
# function can work recursively with nested dicts
_validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
def version_info(gitinfo=False):
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def sanitize_output(arg_string):
''' strips private info out of a string '''
private_keys = ('password', 'login_password')
output = []
for part in arg_string.split():
try:
(k, v) = part.split('=', 1)
except ValueError:
v = heuristic_log_sanitize(part)
output.append(v)
continue
if k in private_keys:
v = 'VALUE_HIDDEN'
else:
v = heuristic_log_sanitize(v)
output.append('%s=%s' % (k, v))
output = ' '.join(output)
return output
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=constants.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def parse_extra_vars(extra_vars_opts, vault_pass):
extra_vars = {}
for extra_vars_opt in extra_vars_opts:
extra_vars_opt = to_unicode(extra_vars_opt)
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
# Arguments as YAML
extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
else:
# Arguments as Key-value
extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
return extra_vars
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
vault_pass = None
new_vault_pass = None
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
sshpass = None
becomepass = None
vaultpass = None
become_prompt = ''
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_method.upper()
if become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
if ask_vault_pass:
vaultpass = getpass.getpass(prompt="Vault password: ")
if vaultpass:
vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
return (sshpass, becomepass, vaultpass)
def choose_pass_prompt(options):
if options.ask_su_pass:
return 'su'
elif options.ask_sudo_pass:
return 'sudo'
return options.become_method
def normalize_become_options(options):
options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
if options.become:
pass
elif options.sudo:
options.become = True
options.become_method = 'sudo'
elif options.su:
options.become = True
options.become_method = 'su'
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
"""
helper function for connection plugins to create privilege escalation commands
"""
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
prompt = None
becomecmd = None
shell = shell or '$SHELL'
if method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
# string to the user's shell. We loop reading output until we see the randomly-generated
# sudo prompt set with the -p option.
prompt = '[sudo via ansible, key=%s] password: ' % randbits
exe = exe or C.DEFAULT_SUDO_EXE
becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
(exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'su':
exe = exe or C.DEFAULT_SU_EXE
flags = flags or C.DEFAULT_SU_FLAGS
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'pbrun':
prompt = 'assword:'
exe = exe or 'pbrun'
flags = flags or ''
becomecmd = '%s -b %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
elif method == 'pfexec':
exe = exe or 'pfexec'
flags = flags or ''
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
if becomecmd is None:
raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
def make_su_cmd(su_user, executable, cmd):
"""
Helper function for connection plugins to create direct su commands
"""
return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def list_union(a, b):
result = []
for x in a:
if x not in result:
result.append(x)
for x in b:
if x not in result:
result.append(x)
return result
def list_intersection(a, b):
result = []
for x in a:
if x in b and x not in result:
result.append(x)
return result
def list_difference(a, b):
result = []
for x in a:
if x not in b and x not in result:
result.append(x)
for x in b:
if x not in a and x not in result:
result.append(x)
return result
def contains_vars(data):
'''
returns True if the data contains a variable pattern
'''
return "$" in data or "{{" in data
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, locals)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError, e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception, e:
if include_exceptions:
return (expr, e)
return expr
def listify_lookup_plugin_terms(terms, basedir, inject):
from ansible.utils import template
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and \
not stripped.startswith("/") and \
not stripped.startswith('set([') and \
not LOOKUP_REGEX.search(terms):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and "{{" in new_terms:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def combine_vars(a, b):
_validate_both_dicts(a, b)
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
def before_comment(msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg
def load_vars(basepath, results, vault_password=None):
"""
Load variables from any potential yaml filename combinations of basepath,
returning result.
"""
paths_to_check = [ "".join([basepath, ext])
for ext in C.YAML_FILENAME_EXTENSIONS ]
found_paths = []
for path in paths_to_check:
found, results = _load_vars_from_path(path, results, vault_password=vault_password)
if found:
found_paths.append(path)
# disallow the potentially confusing situation that there are multiple
# variable files for the same name. For example if both group_vars/all.yml
# and group_vars/all.yaml
if len(found_paths) > 1:
raise errors.AnsibleError("Multiple variable files found. "
"There should only be one. %s" % ( found_paths, ))
return results
## load variables from yaml files/dirs
# e.g. host/group_vars
#
def _load_vars_from_path(path, results, vault_password=None):
"""
Robustly access the file at path and load variables, carefully reporting
errors in a friendly/informative way.
Return the tuple (found, new_results, )
"""
try:
# in the case of a symbolic link, we want the stat of the link itself,
# not its target
pathstat = os.lstat(path)
except os.error, err:
# most common case is that nothing exists at that path.
if err.errno == errno.ENOENT:
return False, results
# otherwise this is a condition we should report to the user
raise errors.AnsibleError(
"%s is not accessible: %s."
" Please check its permissions." % ( path, err.strerror))
# symbolic link
if stat.S_ISLNK(pathstat.st_mode):
try:
target = os.path.realpath(path)
except os.error, err2:
raise errors.AnsibleError("The symbolic link at %s "
"is not readable: %s. Please check its permissions."
% (path, err2.strerror, ))
# follow symbolic link chains by recursing, so we repeat the same
# permissions checks above and provide useful errors.
return _load_vars_from_path(target, results, vault_password)
# directory
if stat.S_ISDIR(pathstat.st_mode):
# support organizing variables across multiple files in a directory
return True, _load_vars_from_folder(path, results, vault_password=vault_password)
# regular file
elif stat.S_ISREG(pathstat.st_mode):
data = parse_yaml_from_file(path, vault_password=vault_password)
if data and type(data) != dict:
raise errors.AnsibleError(
"%s must be stored as a dictionary/hash" % path)
elif data is None:
data = {}
# combine vars overrides by default but can be configured to do a
# hash merge in settings
results = combine_vars(results, data)
return True, results
# something else? could be a fifo, socket, device, etc.
else:
raise errors.AnsibleError("Expected a variable file or directory "
"but found a non-file object at path %s" % (path, ))
def _load_vars_from_folder(folder_path, results, vault_password=None):
"""
Load all variables within a folder recursively.
"""
# this function and _load_vars_from_path are mutually recursive
try:
names = os.listdir(folder_path)
except os.error, err:
raise errors.AnsibleError(
"This folder cannot be listed: %s: %s."
% ( folder_path, err.strerror))
# evaluate files in a stable order rather than whatever order the
# filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
for path in paths:
_found, results = _load_vars_from_path(path, results, vault_password=vault_password)
return results
def update_hash(hash, key, new_value):
''' used to avoid nested .update calls on the parent '''
value = hash.get(key, {})
value.update(new_value)
hash[key] = value
def censor_unlogged_data(data):
'''
used when the no_log: True attribute is passed to a task to keep data from a callback.
NOT intended to prevent variable registration, but only things from showing up on
screen
'''
new_data = {}
for (x,y) in data.iteritems():
if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
new_data[x] = y
new_data['censored'] = 'results hidden due to no_log parameter'
return new_data
def check_mutually_exclusive_privilege(options, parser):
# privilege escalation command line arguments need to be mutually exclusive
if (options.su or options.su_user or options.ask_su_pass) and \
(options.sudo or options.sudo_user or options.ask_sudo_pass) or \
(options.su or options.su_user or options.ask_su_pass) and \
(options.become or options.become_user or options.become_ask_pass) or \
(options.sudo or options.sudo_user or options.ask_sudo_pass) and \
(options.become or options.become_user or options.become_ask_pass):
parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
|
j0057/ansible-1
|
lib/ansible/utils/__init__.py
|
Python
|
gpl-3.0
| 60,463
|
[
"Galaxy",
"VisIt"
] |
503887e7175c2a04a3a5ea9c27e972f08dbf87354df7492215395bd56f44134f
|
import os, shutil
class calculate_stability(object):
def __init__(self):
self.new_sequences_list = []
self.split_new_seq_list = []
self.num_splits = 4
self.structure = "2YP7"
self.bash_file_name = "bash_file.sh"
if self.structure == "1HA0":
self.structure_seq = "MKTIIALSYILCLVFAQKLPGNDN" + "STATLCLGHHAVPNGTLVKTITDDQIEVTNATELVQSSSTGKICNNPHRILDGIDCTLIDALLGDPHCDVFQNETWDLFVERSKAFSNCYPYDVPDYASLRSLVASSGTLEFITEGFTWTGVTQNGGSNACKRGPGSGFFSRLNWLTKSGSTYPVLNVTMPNNDNFDKLYIWGIHHPSTNQEQTSLYVQASGRVTVSTRRSQQTIIPNIGSRPWVRGLSSRISIYWTIVKPGDVLVINSNGNLIAPRGYFKMRTGKSSIMRSDAPIDTCISECITPNGSIPNDKPFQNVNKITYGACPKYVKQNTLKLATGMRNVPEKQTQGLFGAIAGFIENGWEGMIDGWYGFRHQNSEGTGQAADLKSTQAAIDQINGKLNRVIEKTNEKFHQIEKEFSEVEGRIQDLEKYVEDTKIDLWSYNAELLVALENQHTIDLTDSEMNKLFEKTRRQLRENAEEMGNGCFKIYHKCDNACIESIRNGTYDHDVYRNEALNNRFQI"
elif self.structure == "2YP7":
self.structure_seq = "MKTIIALSYILCLVFAQKLPGNDN" + "STATLCLGHHAVPNGTIVKTITNDQIEVTNATELVQSSSTGGICDSPHQILDGENCTLIDALLGDPQCDGFQNKKWDLFVERSKAYSNCYPYDVPDYASLRSLVASSGTLEFNNESFNWTGVTQNGTSSACKRKSNNSFFSRLNWLTHLKFKYPALNVTMPNNEKFDKLYIWGVHHPGTDNDQIFLYAQASGRITVSTKRSQQTVIPNIGSRPRVRNIPSRISIYWTIVKPGDILLINSTGNLIAPRGYFKIRSGKSSIMRSDAPIGKCNSECITPNGSIPNDKPFQNVNRITYGACPRYVKQNTLKLATGMRNVPEKQTQGIFGAIAGFIENGWEGMVDGWYGFRHQNSEGIGQAADLKSTQAAINQINGKLNRLIGKTNEKFHQIEKEFSEVEGRIQDLEKYVEDTKIDLWSYNAELLVALENQHTIDLTDSEMNKLFERTKKQLRENAEDMGNGCFKIYHKCDNACIGSIRNGTYDHDVYRDEALNNRFQI"
#self.structure_outgroup ="MKTIIALSYILCLVFAQKLPGNDNSTATLCLGHHAVPNGTLVKTITNDQIEVTNATELVQSSSTGRICDSPHRILDGKNCTLIDALLGDPHCDGFQNKEWDLFVERSKAYSNCYPYDVPDYASLRSLVASSGTLEFINEDFNWTGVAQDGGSYACKRGSVNSFFSRLNWLHKSEYKYPALNVTMPNNGKFDKLYIWGVHHPSTDRDQTSLYVRASGRVTVSTKRSQQTVTPNIGSRPWVRGQSSRISIYWTIVKPGDILLINSTGNLIAPRGYFKIRNGKSSIMRSDAPIGTCSSECITPNGSIPNDKPFQNVNRITYGACPRYVKQNTLKLATGMRNVPEKQTRGIFGAIAGFIENGWEGMVDGWYGFRHQNSEGTGQAADLKSTQAAIDQINGKLNRLIEKTNEKFHQIEKEFSEVEGRIQDLEKYVEDTKIDLWSYNAELLVALENQHTIDLTDSEMNKLFEKTRKQLRENAEDMGNGCFKIYHKCDNACIGSIRNGTYDHDVYRDEALNNRFQIKGVELKSGYKDWILWISFAISCFLLCVVLLGFIMWACQKGNIRCNICI"
self.aa = ["A", "R", "N", "D", "C", "E", "Q", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V"]
def run(self):
self.create_mutated_sequences()
self.prep_split_folders()
os.system("chmod 755 " + self.bash_file_name)
os.system("./" + self.bash_file_name)
def create_mutated_sequences(self):
'''
Create all possible sequences from the given structure sequence by just mutating one position
:return:
'''
for site in range(len(self.structure_seq)):
for amino_acid in self.aa:
mutate_seq = list(self.structure_seq)
mutate_seq[site] = amino_acid
mutate_seq = "".join(mutate_seq)
self.new_sequences_list.append(mutate_seq)
print("Number of Mutations to run" + str(len(self.new_sequences_list)))
self.split_new_seq_list = self.split_list(self.new_sequences_list, self.num_splits)
def split_list(self, initial, n):
'''
Split initial list into n lists
:return list of split lists
'''
out = []
new_index = int(1.0 * len(initial) / n + 0.5)
for i in range(n-1):
out.append(initial[i*new_index:i*new_index+new_index])
out.append(initial[n*new_index-new_index:])
return out
def prep_split_folders(self):
'''
Create split folders and sequence files to run on the cluster and a bash file to call
:return:
'''
bash_file = open(self.bash_file_name, 'w')
bash_file.write("#!/bin/sh" + "\n")
num = 1
for list in self.split_new_seq_list:
self.make_split_sequence_files(num, list)
self.add_bash_run(bash_file, num)
num+=1
bash_file.write("wait")
bash_file.close()
def add_bash_run(self, bash_file, index):
'''
Add a line to the bash file to calculate stabilities in the designated split
:param bash_file: the bash file to write to
:param index: the split to work with
:return:
'''
bash_file.write("srun -n 1 -c 1 -t 72:00:00 -o output_" + str(index) + ".txt python " + str(index) + "_foldx_split/run_mutator_cluster.py " + str(index) + " " + self.structure + " & \n")
#bash_file.write("python " + str(index) + "_foldx_split/run_mutator_cluster.py " + str(index) + " " + self.structure + " & \n")
def move_foldx_files(self, folder):
'''
:param folder: the path of where you want to move the files
:return:moves the files from /src to the folder
'''
essential_files_directory = os.getcwd() + "/foldx_essentials"
essential_files = os.listdir(essential_files_directory)
for file in essential_files:
file_name = os.path.join(essential_files_directory, file)
if os.path.isfile(file_name):
shutil.copy(file_name, folder)
def make_split_sequence_files(self, index, list_sequences):
'''
makes the split folder and moves all essential foldx files into it, then makes the sequence file
:param index: identifying which split folder we are currently working with
:param list_sequences: list of sequences to put in this split folder
:return:
'''
folder_name = str(index) + "_foldx_split"
if not os.path.exists(folder_name):
os.mkdir(folder_name)
self.move_foldx_files(folder_name)
sequence_file_name = str(index) + "_sequences_file.txt"
sequence_file = open(folder_name + "/" +sequence_file_name, 'w')
for sequence in list_sequences:
sequence_file.write(sequence + "\n")
def main():
new_sequences = calculate_stability()
new_sequences.run()
if __name__ == "__main__":
main()
|
blab/stability
|
augur/mutator/mutate_everything_2YP7/mutator_stability.py
|
Python
|
agpl-3.0
| 5,914
|
[
"FoldX"
] |
5e7a32fd50664c0a204ce57f8d672885f0e946025a1caa9a362255431309ddb7
|
import binascii
import pytest
import flextls
from flextls.connection import DTLSv10Connection
from flextls.exception import NotEnoughData
from flextls.protocol.handshake import DTLSv10Handshake
from flextls.protocol.handshake import ServerCertificate
from flextls.protocol.record import Record, DTLSv10Record
class TestDTLSv10(object):
def test_empty_data(self):
with pytest.raises(NotEnoughData):
DTLSv10Record().decode(b"")
def test_not_enough_data(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 0, Length 205
data = binascii.unhexlify(b"16feff000000000000000000cd")
with pytest.raises(NotEnoughData):
DTLSv10Record().decode(data)
#
assert binascii.hexlify(data) == b"16feff000000000000000000cd"
class TestCertificate(object):
# Certificate Length: 681, Certificate Data
_cert = b"0002a90002a6308202a23082020ba003020102020900e8ffa7c3bdac30" \
b"81300d06092a864886f70d0101050500306a310b300906035504061302" \
b"44453110300e06035504080c075361636873656e31143012060355040a" \
b"0c0b4578616d706c6520496e633112301006035504030c096c6f63616c" \
b"686f7374311f301d06092a864886f70d01090116106365727440657861" \
b"6d706c652e6f7267301e170d3135303131303037333733345a170d3136" \
b"303131303037333733345a306a310b3009060355040613024445311030" \
b"0e06035504080c075361636873656e31143012060355040a0c0b457861" \
b"6d706c6520496e633112301006035504030c096c6f63616c686f737431" \
b"1f301d06092a864886f70d010901161063657274406578616d706c652e" \
b"6f726730819f300d06092a864886f70d010101050003818d0030818902" \
b"818100a742a7933fd1877d8596a8c99d36009502ce0e6bea07b5b2de31" \
b"bd39a62177475ed73b3439166845e5d48199391d9fd0a90997d0790744" \
b"a4748ea271ed301920898b5b5a7d0c4d91c0fc06c1585ed2e050c8b7c7" \
b"8eef239fdcdbcf91510e52d862beb839d80e4bc431c290f0da89960bf2" \
b"0c655a201bdaf768478f2e22539f050203010001a350304e301d060355" \
b"1d0e0416041487dca658f477a8be358453feb61c796d6a6c5b5d301f06" \
b"03551d2304183016801487dca658f477a8be358453feb61c796d6a6c5b" \
b"5d300c0603551d13040530030101ff300d06092a864886f70d01010505" \
b"00038181003d1dfb7cdd46b2fb8b1d3fa18207634056ddfae8fc5e3ce7" \
b"24a1dd0d154f73d885711024322cfd88871156807061bffa15378fe341" \
b"d4b91773cdba279645458af6fc3511fc613c284bc36e69559428c6b8a9" \
b"4cc674399bc69dc8c2e673ea709638320bdd98d0a3c4b7a94e31184e27" \
b"e75c4273543b02a6ca1151b8a4bb03da79"
def test_pkg1(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 2, Length 696
data = b"16feff000000000000000202b8"
# Certificate, Length 684, Message Sequence 2, Fragment Offset 0, Fragment Length 684
data += b"0b0002ac00020000000002ac"
data += self._cert
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert len(data) == 0
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.epoch == 0
assert record.sequence_number == 2
assert record.length == 696
# Handshake
handshake = record.payload
assert handshake.type == 11
assert handshake.length == 684
assert handshake.message_seq == 2
assert handshake.fragment_offset == 0
assert handshake.fragment_length == 684
# Certificate
certificate = record.payload.payload
assert len(certificate.certificate_list) == 1
assert len(certificate.certificate_list[0].value) == 678
def test_pkg2(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 2, Length 696
record_header = b"16feff0000000000000002"
# Certificate, Length 684, Message Sequence 2,
cert_header = b"0b0002ac0002"
conn_dtls = DTLSv10Connection(
protocol_version=flextls.registry.version.DTLSv10
)
conn_dtls._handshake_next_receive_seq = 2
n = 200
# Fragment Offset 0 Fragment Length
data_splited = [self._cert[i:i + n] for i in range(0, len(self._cert), n)]
for i, part in enumerate(data_splited):
tmp = "%.6x%.6x" % (i * n / 2, len(part) / 2)
cert_data = cert_header + tmp.encode('ascii') + part
tmp = "%.4x" % (len(cert_data) / 2)
data = record_header + tmp.encode('ascii') + cert_data
conn_dtls.decode(binascii.unhexlify(data))
assert conn_dtls.is_empty() is False
record = conn_dtls.pop_record()
assert isinstance(record, DTLSv10Handshake)
assert isinstance(record.payload, ServerCertificate)
class TestClientHello(object):
def test_pkg1(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 0, Length 205
data = b"16feff000000000000000000cd"
# Client Hello, Length 193, Message Sequence 0, Fragment Offset 0, Fragment Length 193
data += b"010000c100000000000000c1"
# DTLS 1.0
data += b"feff"
# Random
data += b"24dc8f65fb5970f29af7f330b6a00942d71783db3230cba5bdb98213efdbb99f"
# Session ID Length 0, Cookie Length 0
data += b"0000"
# Cipher Suites Length 78
data += b"004e"
# Cipher Suites 39
data += b"c014c00a0039003800880087c00fc00500350084"
data += b"c013c00900330032009a009900450044c00ec004"
data += b"002f009600410007c012c00800160013c00dc003"
data += b"000a001500120009001400110008000600ff"
# Compression Methods Length 1: null
data += b"0100"
# Extensions, Length 73
data += b"0049000b000403000102000a00340032000e000d"
data += b"0019000b000c00180009000a0016001700080006"
data += b"0007001400150004000500120013000100020003"
data += b"000f0010001100230000000f000101"
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.payload.type == 1
assert record.payload.length == 193
# Client Hello
client_hello = record.payload.payload
assert client_hello.version.major == 254
assert client_hello.version.minor == 255
assert len(client_hello.random) == 32
assert len(client_hello.session_id) == 0
assert len(client_hello.cipher_suites) == 39
assert len(client_hello.compression_methods) == 1
def test_pkg2(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 1, Length 225
data = b"16feff000000000000000100e1"
# Client Hello, Length 213, Message Sequence 1, Fragment Offset 0, Fragment Length 213
data += b"010000d500010000000000d5"
# DTLS 1.0
data += b"feff"
# Random
data += b"24dc8f65fb5970f29af7f330b6a00942d71783db3230cba5bdb98213efdbb99f"
# Session ID Length 0, Cookie Length 20, Cookie Data
data += b"00142c24633bb13af58be4a0f50e47767cfa93e63515"
# Cipher Suites Length 78
data += b"004e"
# Cipher Suites 39
data += b"c014c00a0039003800880087c00fc00500350084"
data += b"c013c00900330032009a009900450044c00ec004"
data += b"002f009600410007c012c00800160013c00dc003"
data += b"000a001500120009001400110008000600ff"
# Compression Methods Length 1: null
data += b"0100"
# Extensions, Length 73
data += b"0049000b000403000102000a00340032000e000d"
data += b"0019000b000c00180009000a0016001700080006"
data += b"0007001400150004000500120013000100020003"
data += b"000f0010001100230000000f000101"
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert len(data) == 0
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.epoch == 0
assert record.sequence_number == 1
assert record.length == 225
# Handshake
handshake = record.payload
assert handshake.type == 1
assert handshake.length == 213
assert handshake.message_seq == 1
assert handshake.fragment_offset == 0
assert handshake.fragment_length == 213
# Client Hello
client_hello = record.payload.payload
assert client_hello.version.major == 254
assert client_hello.version.minor == 255
assert len(client_hello.random) == 32
assert len(client_hello.session_id) == 0
assert len(client_hello.cookie) == 20
assert len(client_hello.cipher_suites) == 39
assert len(client_hello.compression_methods) == 1
assert len(client_hello.extensions) == 4
class TestClientKeyExchange(object):
def test_pkg1(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 2, Length 78
data = b"16feff0000000000000002004e"
# Client Key Exchange, Length 66, Message Sequence 2, Fragment Offset 0, Fragment Length 66
data += b"100000420002000000000042"
# Pubkey Length: 65
data += b"41"
# Pubkey
data += b"0466c160c0cc7a657c0dbd19be373922ffed1e78315706332c17ccb79b" \
b"3b7d9050fd55bc74c37f36a8d4c6773b95314fe268e0385e490ef73079" \
b"c405f54c61265e"
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert len(data) == 0
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.epoch == 0
assert record.sequence_number == 2
assert record.length == 78
# Handshake
handshake = record.payload
assert handshake.type == 16
assert handshake.length == 66
assert handshake.message_seq == 2
assert handshake.fragment_offset == 0
assert handshake.fragment_length == 66
# Server Key Exchange
key_exchange = record.payload.payload
class TestHelloVerifyRequest(object):
def test_pkg1(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 0, Length 35
data = b"16feff00000000000000000023"
# Hello Verify Request, Length 23, Message Sequence 0, Fragment Offset 0, Fragment Length 23
data += b"030000170000000000000017"
# DTLS 1.0
data += b"feff"
# Cookie Length: 20, Cockie (20 bytes)
data += b"142c24633bb13af58be4a0f50e47767cfa93e63515"
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert len(data) == 0
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.length == 35
assert record.payload.type == 3
assert record.payload.length == 23
assert record.payload.message_seq == 0
assert record.payload.fragment_offset == 0
assert record.payload.fragment_length == 23
# Client Hello
hello_verify = record.payload.payload
assert hello_verify.version.major == 254
assert hello_verify.version.minor == 255
assert len(hello_verify.cookie) == 20
class TestServerHello(object):
def test_pkg1(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 1, Length 74
data = b"16feff0000000000000001004a"
# Server Hello, Length 62, Message Sequence 1, Fragment Offset 0, Fragment Length 62
data += b"0200003e000100000000003e"
# DTLS 1.0
data += b"feff"
# Random
data += b"0904c079eaf6fc8ccbb345bf1b279158d0127ec87bc2cf971c6c94ac42d1abd8"
# Session ID Length 0, Cipher Suite: TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA (0xc014), Compression Method: null (0)
data += b"00c01400"
# Extensions, Length 22
data += b"0016ff01000100000b0004030001020023000000"
data += b"0f000101"
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert len(data) == 0
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.epoch == 0
assert record.sequence_number == 1
assert record.length == 74
# Handshake
handshake = record.payload
assert handshake.type == 2
assert handshake.length == 62
assert handshake.message_seq == 1
assert handshake.fragment_offset == 0
assert handshake.fragment_length == 62
# Server Hello
server_hello = record.payload.payload
assert server_hello.version.major == 254
assert server_hello.version.minor == 255
assert len(server_hello.random) == 32
assert len(server_hello.session_id) == 0
# ToDo: test cipher suite and compression
assert len(server_hello.extensions) == 4
class TestServerHelloDone(object):
def test_pkg1(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 4, Length 12
data = b"16feff0000000000000004000c"
# Server Hello Done, Length 12, Message Sequence 4, Fragment Offset 0, Fragment Length 0
data += b"0e0000000004000000000000"
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert len(data) == 0
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.epoch == 0
assert record.sequence_number == 4
assert record.length == 12
# Handshake
handshake = record.payload
assert handshake.type == 14
assert handshake.length == 0
assert handshake.message_seq == 4
assert handshake.fragment_offset == 0
assert handshake.fragment_length == 0
class TestServerKeyExchange(object):
def test_pkg1(self):
# Handshake, DTLSv1.0, Epoch 0, Sequence Number 3, Length 211
data = b"16feff000000000000000300d3"
# Server Key Exchange, Length 199, Message Sequence 3, Fragment Offset 0, Fragment Length 199
data += b"0c0000c700030000000000c7"
# Curve Type: named_curve (0x03), Named Curve: secp256r1 (0x0017)
data += b"030017"
# Pubkey Length: 65
data += b"41"
# Pubkey
data += b"0407220baac1ab19e1bcf6151a86a9e6c6d8f35b6bc034b9f6b26d8a82" \
b"6f9081c57f7038f66c1e9473e96310194cd71609038a5d1425951e857a" \
b"ee8d61e4a657d9"
# Signature Length: 128
data += b"0080"
# Signature
data += b"877afeccec9b09ecf17c637be672367f8a12127af39e5f4a93ced4989e" \
b"5fb213a4e99418480b54e5aac1f56865510620c1ae6bdcfad22511089a" \
b"053552b7da770b252e993c45a6354fc4d7bfdb844d1fa8748a22057a2a" \
b"8e38410c5ef6bec7acf6eda364c3d0afdddaef7b6d9745dc514bcb7241" \
b"0468624094790cf054475dd6"
(record, data) = DTLSv10Record().decode(binascii.unhexlify(data))
assert len(data) == 0
assert record.content_type == 22
assert record.version.major == 254
assert record.version.minor == 255
assert record.epoch == 0
assert record.sequence_number == 3
assert record.length == 211
# Handshake
handshake = record.payload
assert handshake.type == 12
assert handshake.length == 199
assert handshake.message_seq == 3
assert handshake.fragment_offset == 0
assert handshake.fragment_length == 199
# Server Key Exchange
key_exchange = record.payload.payload
|
DinoTools/python-flextls
|
tests/test_dtls_1_0.py
|
Python
|
lgpl-3.0
| 15,918
|
[
"FEFF"
] |
22b646270600da7414ebfc9e8ad8076c1ce9f6b1a4b73435a14caaf0bc00bc02
|
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import glob
import astropy.io.fits as pyfits
import scipy.ndimage as nd
from astropy.modeling import models, fitting
plt.ion()
fnames = glob.glob('/Users/mireland/data/veloce/Feb28/QE*fits')
focus = [int(f[-8:-5]) for f in fnames]
fnames = glob.glob('/Users/mireland/data/veloce/Feb28/Qe*fits')
focus = np.zeros( (len(fnames)) )
fnames = glob.glob('/Users/mireland/data/veloce/Mar01/QT*fits')
focus = [int(f[-8:-5]) for f in fnames]
nstart=8
nfirst=3
threshold=500
display_it=False
x_ignore = 100 #500 for etalon data.
#------------------------------------------
szx = pyfits.getheader(fnames[0])['NAXIS1']
szy = pyfits.getheader(fnames[0])['NAXIS2']
#First lets read in the first few frames, and take a median to search for
#bright lines. We do this to avoid cosmic rays.
first_data = np.zeros((nfirst,szy,szx))
for ix, fn in enumerate(fnames[nstart:nstart+nfirst]):
first_data[ix] = pyfits.getdata(fn)
med_data = np.median(first_data, axis=0)
#Now lets look for possible peaks amongst the difference between this median
#frame and a smoothed version of it.
smoothim = nd.median_filter(med_data,(3,3))
wpeaks = np.where(med_data-smoothim > threshold)
#Lets manually make sure we only pick each peak once by just using the first coordinate
#we find within 5 pixels.
peaks = []
nfound = 0
for xix, yix in zip(wpeaks[1], wpeaks[0]):
if xix<x_ignore or yix <100 or xix>4100 or yix>4000:
continue
surrounding_pix = med_data[yix-5:yix+5,xix-5:xix+5]
if np.max(surrounding_pix) > 40000:
continue
duplicate=False
for p in peaks:
dpeak = np.sqrt( (p[0]-xix)**2 + (p[1]-yix)**2 )
if dpeak < 12:
duplicate=True
continue
if not duplicate:
peaks.append([int(xix),int(yix)])
nfound += 1
if (nfound % 100 == 0):
print("Done {:d} peaks".format(nfound))
peaks = np.array(peaks)
#Now lets run through the images and fit a 2D Gaussian in the vicinity of each peak.
peakparams = np.zeros( (len(fnames),peaks.shape[0], 6) )
hw = 5
fit_p = fitting.LevMarLSQFitter()
xy_fit = np.meshgrid(np.arange(2*hw), np.arange(2*hw))
for j, fn in enumerate(fnames):
print("Doing file {:d} of {:d}".format(j, len(fnames)))
dd = pyfits.getdata(fn)
for i, xyix in enumerate(peaks):
subarr = dd[xyix[1]-hw:xyix[1]+hw, xyix[0]-hw:xyix[0]+hw]
subarr -= np.median(subarr)
if (False):
plt.clf()
plt.imshow(subarr)
plt.pause(0.001)
xymax = np.argmax(subarr)
xymax = np.unravel_index(xymax, subarr.shape)
p_init = models.Gaussian2D(amplitude=np.max(subarr),x_mean=xymax[1], y_mean=xymax[0],
x_stddev = 3.0, y_stddev = 3.0, fixed={"theta":True})
p = fit_p(p_init, xy_fit[0], xy_fit[1], subarr)
peakparams[j,i] = [p.amplitude.value, p.x_mean.value, p.y_mean.value, p.x_stddev.value, p.y_stddev.value, p.theta.value]
if ((peakparams[j,i,4]<1.5/2.35) and (i != 2) and display_it==True):
print(focus[j])
print(xyix)
print(p)
plt.clf()
plt.imshow(subarr)
plt.plot(peakparams[j,i,1], peakparams[j,i,2], 'x')
plt.pause(.001)
#import pdb; pdb.set_trace()
focus = np.array(focus)
if np.max(np.abs(focus)) > 0:
wfoc = np.where((focus > 150) & (focus < 180))[0]
spectral_best = np.mean(peakparams[wfoc,:,4], axis=0)
spat_ix=-3 #-3 for best. Also 9
for j in np.arange(0,2):
plt.figure(j+1)
plt.clf()
if j==0:
#Restrict the y coordinate
wc = np.where( (peaks[:,1]>500) * (peaks[:,1] < 3500))[0]
else:
wc = np.where( (peaks[:,0]>500) * (peaks[:,0] < 3500))[0]
plt.plot(peaks[wc,j], peakparams[spat_ix,wc,3]*2.35,'.', label='Spatial')
plt.plot(peaks[wc,j], peakparams[spat_ix,wc,4]*2.35,'.', label='Spectral')
plt.axis([100,4000,0,2.5])
plt.legend()
plt.ylabel('FWHM')
if j==0:
plt.xlabel('x pix')
else:
plt.xlabel('y pix')
|
mikeireland/veloce
|
utils/fwhm_computation.py
|
Python
|
gpl-3.0
| 4,129
|
[
"Gaussian"
] |
a7c7e8bbfbed471c21ed96731a0f41644b7759e7b6fa3ba34dd001c757689a7b
|
"""
cache v0.01
simple object serializing cache wrapper and client side partitioner for redis
Copyright 2012 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
import cPickle
import json
import hashlib
import sys
# DEBUG
#
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
try:
import redis
using_redis = True
print "cache: redis module detected"
except:
redis = None
using_redis = False
#simple pickling cache wrapper for redis
#if the optional redis server isn't being used, all functions return None
class cache:
def __init__(self):
self.using_redis = using_redis
self.serialize_with = 'JSON' #or 'cPickle' or 'None'
self.partitions = 1 #default 1 (one redis instance)
#each additional instance assumes +1 to the last port number
self.partition_history = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0,'7':0}
self.port = 6379
self.r = None
if self.using_redis:
try:
self.r = redis.StrictRedis(host='127.0.0.1',port=self.port,db=0)
#self.r = redis.Redis(unix_socket_path='/tmp/redis.sock')
self.r.get('testing connection')
except:
print "cache: can't connect to redis server, caching disabled"
self.using_redis = False
def select_partition(self,key):
partition_index = int(hashlib.md5(key).hexdigest()[0],16)%self.partitions
self.partition_history[str(partition_index)] += 1
target_port = partition_index + 6379
try:
del self.r
self.r = redis.StrictRedis(host='127.0.0.1',port=target_port,db=0)
except:
print "cache: can't connect to redis server, caching disabled",target_port
self.using_redis = False
def set(self,key,value):
if self.using_redis:
if self.partitions > 1:
self.select_partition(key)
#print "cache: set",key
if self.serialize_with == 'cPickle':
return self.r.set(key,cPickle.dumps(value))
elif self.serialize_with == 'JSON':
return self.r.set(key,json.dumps(value))
elif self.serialize_with == 'None':
return self.r.set(key,value)
else:
raise Exception("Serializer not supported")
else:
return None
def get(self,key):
if self.using_redis:
if self.partitions > 1:
self.select_partition(key)
#print "cache: get",key
value = self.r.get(key)
if value == None:
return None
else:
if self.serialize_with == 'cPickle':
return cPickle.loads(value)
elif self.serialize_with == 'JSON':
return json.loads(value)
elif self.serialize_with == 'None':
return value
else:
raise Exception("Serializer not supported")
else:
return None
def expire(self,key,expiration):
if self.using_redis:
if self.partitions > 1:
self.select_partition(key)
print "cache: set expire",key
return self.r.expire(key,expiration)
return None
def disable(self):
print "cache: disabled"
self.using_redis = False
return
if __name__ == '__main__':
import os
import string
import time
import random
#do some testing
print 'building the data sets'
#xlarge = os.urandom(100000000) #100MB
#large = os.urandom(10000000) #10MB
#med = os.urandom(1000000) #1MB
#small = os.urandom(100000) #100KB
xlarge = range(10000) #1M
large = range(1000) #1M
med = range(100) #100K
small = range(10) #10K
key_set1 = []
key_set2 = []
key_set3 = []
print 'generating the keys'
for i in range(1000):
key_set1.append(''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10)))
key_set2.append(''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10)))
key_set3.append(''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(10)))
for serializer in ['cPickle','JSON','None']:
c = cache()
c.partitions = 1
c.serialize_with = serializer
for dataset in [xlarge,large,med,small]:
#timer start
start = time.time()
for key_set in [key_set1,key_set2,key_set3]:
for key in key_set:
c.set(key,dataset)
#timer stop
stop = time.time()
print "Serializer:",serializer,", Keys:",len(key_set),", Value length:",len(dataset),", Total value length",(len(key_set) * len(dataset))/1000000.0,", Elapsed Time",stop-start
print "-"*80
print c.partition_history
|
khoffrath/ga-bitbot
|
libs/cache.py
|
Python
|
gpl-3.0
| 5,642
|
[
"Brian"
] |
72fc5540a9003a44c7c2e6bac1ac4f9d0651d5f34f2e179842bc18165424f119
|
import unittest
import os
from io import StringIO
from molutils.util.molecule import Molecule
from molutils.util.periodic_table import lookup_element_by_symbol
from molutils.util.job_formatters.psi4 import Psi4JobFormatter
PATH_TO_PSI4 = "/opt/psi4/bin/psi4.run"
DIMER_XYZ_FILE = (
"21\n"
"molecule_title\n"
"C 0.00000000 0.00000000 0.00000000\n"
"N 0.00000000 0.00000000 1.37405500\n"
"C 1.26815400 0.00000000 1.80843900\n"
"N 2.07877900 -0.02792200 0.74098800\n"
"C 1.31304500 -0.01765900 -0.39985000\n"
"C -1.17649400 0.19094100 2.22511900\n"
"C 3.53427200 0.12780900 0.79070400\n"
"F 1.09782400 2.71748500 1.13271500\n"
"B 1.51343800 2.90805500 2.48894200\n"
"F 0.55290600 2.25488800 3.33063400\n"
"F 1.62575900 4.25335000 2.80352900\n"
"F 2.76311600 2.22460800 2.65697000\n"
"H 1.58129100 0.09619200 2.83199800\n"
"H 1.75047000 -0.01157500 -1.38238800\n"
"H -0.91045500 0.02428600 -0.57204000\n"
"H -1.96615300 -0.46999100 1.87769000\n"
"H -0.90427100 -0.04968500 3.24681200\n"
"H -1.47152400 1.23473700 2.17603800\n"
"H 3.87180400 -0.11397200 1.79244500\n"
"H 3.97984900 -0.54952400 0.06697900\n"
"H 3.77751100 1.16445100 0.57797700\n")
WATER_XYZ_FILE = (
"3\n"
"\n"
"O 0.008165 -0.658276 0.215737\n"
"H 1.508729 0.441434 0.259102\n"
"H -1.419465 0.543189 0.243811\n"
)
NITROGEN_ATOM = (
"1\n"
"\n"
"N 0.00 0.00 0.00"
)
PSI4_OUTPUT = (
" ==> Geometry <==\n"
"\n"
" Molecular point group: c1\n"
" Full point group: C1\n"
"\n"
" Geometry (in Angstrom), charge = 0, multiplicity = 1:\n"
"\n"
" Center X Y Z Mass\n"
" ------------ ----------------- ----------------- ----------------- -----------------\n"
" F -2.193629820047 -1.855516068908 -1.383582467737 18.998403224000\n"
" C -1.172015920047 -1.019644636908 -1.218746331737 12.000000000000\n"
" C 0.114669063953 -1.517962241908 -1.059929846737 12.000000000000\n"
" F 0.325520175953 -2.831149664908 -1.070726500737 18.998403224000\n"
" F -2.628991937047 0.828237203092 -1.313676415737 18.998403224000\n"
" C -1.394261984047 0.350968436092 -1.184005598737 12.000000000000\n"
" C -0.330144528047 1.222797545092 -0.992235587737 12.000000000000\n"
" F -0.545459093047 2.533419542092 -0.947497268737 18.998403224000\n"
" C 1.178862928953 -0.646155231908 -0.868311389737 12.000000000000\n"
" F 2.407922502953 -1.124703761908 -0.705774696737 18.998403224000\n"
" C 0.957567807953 0.724990373092 -0.837317381737 12.000000000000\n"
" F 1.975930935953 1.559110990092 -0.661179892737 18.998403224000\n"
" H -1.644566974047 -1.210383778908 2.174557304263 1.007825032070\n"
" C -0.812314959047 -0.527350407908 2.281532461263 12.000000000000\n"
" H -2.042421771047 1.234253206092 2.190249710263 1.007825032070\n"
" C -1.036491811047 0.849083608092 2.292233299263 12.000000000000\n"
" C 0.484176730953 -1.023293069908 2.416217104263 12.000000000000\n"
" H 0.658329572953 -2.091089033908 2.410131913263 1.007825032070\n"
" C 0.035280831953 1.729129498092 2.439058175263 12.000000000000\n"
" H -0.138951482047 2.796453733092 2.446813182263 1.007825032070\n"
" C 1.331069796953 1.232787556092 2.574283147263 12.000000000000\n"
" H 2.162281353953 1.915280645092 2.685863082263 1.007825032070\n"
" C 1.555807218953 -0.143098610908 2.562921850263 12.000000000000\n"
" H 2.561079041953 -0.528098246908 2.666538093263 1.007825032070\n"
"\n"
" Running in c1 symmetry.\n"
"\n"
)
class MoleculeTest(unittest.TestCase):
@staticmethod
def get_sorted_atom_string(molecule):
return ''.join(sorted([atom[0] for atom in molecule]))
def test_read(self):
molecule = Molecule.from_xyz_file(StringIO(DIMER_XYZ_FILE))
self.assertEqual(len(molecule.atom_list), 21)
self.assertEqual(len(molecule), 21)
self.assertEqual(self.get_sorted_atom_string(molecule), 'BCCCCCFFFFHHHHHHHHHNN')
molecule = Molecule.from_psi4_output(StringIO(PSI4_OUTPUT))
self.assertEqual(len(molecule.atom_list), 24)
self.assertEqual(len(molecule), 24)
self.assertEqual(self.get_sorted_atom_string(molecule), 'CCCCCCCCCCCCFFFFFFHHHHHH')
def test_fragment(self):
molecule = Molecule.from_xyz_file(StringIO(DIMER_XYZ_FILE))
# Create two fragments and sort them by number of atoms
fragments = sorted(molecule.fragment(2), key=len)
# Check that two fragments are created
self.assertEqual(len(fragments), 2)
# The first fragment should have five atoms, BFFF
self.assertEqual(len(fragments[0]), 5)
self.assertEqual(self.get_sorted_atom_string(fragments[0]), 'BFFFF')
# The second fragment should have 16 atoms, CCCCCHHHHHHHHHNN
self.assertEqual(len(fragments[1]), 16)
self.assertEqual(self.get_sorted_atom_string(fragments[1]), 'CCCCCHHHHHHHHHNN')
def test_z_sum(self):
molecule = Molecule.from_xyz_file(StringIO(WATER_XYZ_FILE))
self.assertEqual(molecule.get_z_sum(), 10)
def test_lookup_atom_z(self):
self.assertEqual(lookup_element_by_symbol('H')[0], 1)
self.assertEqual(lookup_element_by_symbol('h')[0], 1)
self.assertIsNone(lookup_element_by_symbol('zz'))
def test_possible_charges(self):
ionic_dimer = Molecule.from_xyz_file(StringIO(DIMER_XYZ_FILE))
fragments = sorted(ionic_dimer.fragment(2), key=len)
self.assertEqual(ionic_dimer.get_possible_charges(), [0])
self.assertEqual(fragments[0].get_possible_charges(), [-1, 1])
self.assertEqual(fragments[1].get_possible_charges(), [-1, 1])
water = Molecule.from_xyz_file(StringIO(WATER_XYZ_FILE))
self.assertEqual(water.get_possible_charges(), [0])
nitrogen_atom = Molecule.from_xyz_file(StringIO(NITROGEN_ATOM))
self.assertEqual(nitrogen_atom.get_possible_charges(multiplicity=4), [0])
self.assertEqual(nitrogen_atom.get_possible_charges(multiplicity=1), [-1, 1])
def test_guess_charge(self):
if not os.path.isfile(PATH_TO_PSI4) or not os.access(PATH_TO_PSI4, os.X_OK):
self.skipTest("No Psi4 executable found.")
molecule = Molecule.from_xyz_file(StringIO(DIMER_XYZ_FILE), PATH_TO_PSI4)
fragments = sorted(molecule.fragment(2), key=len)
self.assertEqual(molecule.guess_charge(), 0)
self.assertEqual(fragments[0].guess_charge(), -1)
self.assertEqual(fragments[1].guess_charge(), 1)
nitrogen_atom = Molecule.from_xyz_file(StringIO(NITROGEN_ATOM), PATH_TO_PSI4)
self.assertEqual(nitrogen_atom.guess_charge(multiplicity=4), 0)
self.assertEqual(nitrogen_atom.guess_charge(multiplicity=1), -1)
def test_efp_atom_padding(self):
molecule = Molecule('', [('Cl', 0.0, 0.0, 0.0)], 1, 1)
molecule.efp_pad_dummy_atoms()
self.assertEqual(len(molecule), 3)
molecule = Molecule('', [('Cl', 0.0, 0.0, 0.0), ('Cl', 0.5, 0.0, 0.0),], 0, 1)
molecule.efp_pad_dummy_atoms()
self.assertEqual(len(molecule), 3)
def test_sapt_job_format(self):
expected_output = (
"memory 250 mb\n"
"molecule molecule_title2 {\n"
"-1 1\n"
" F 1.0978240000 2.7174850000 1.1327150000\n"
" B 1.5134380000 2.9080550000 2.4889420000\n"
" F 1.6257590000 4.2533500000 2.8035290000\n"
" F 2.7631160000 2.2246080000 2.6569700000\n"
" F 0.5529060000 2.2548880000 3.3306340000\n"
"--\n"
"1 1\n"
" C 0.0000000000 0.0000000000 0.0000000000\n"
" H -0.9104550000 0.0242860000 -0.5720400000\n"
" C 1.3130450000 -0.0176590000 -0.3998500000\n"
" H 1.7504700000 -0.0115750000 -1.3823880000\n"
" N 0.0000000000 0.0000000000 1.3740550000\n"
" C 1.2681540000 0.0000000000 1.8084390000\n"
" H 1.5812910000 0.0961920000 2.8319980000\n"
" N 2.0787790000 -0.0279220000 0.7409880000\n"
" C -1.1764940000 0.1909410000 2.2251190000\n"
" H -0.9042710000 -0.0496850000 3.2468120000\n"
" H -1.4715240000 1.2347370000 2.1760380000\n"
" H -1.9661530000 -0.4699910000 1.8776900000\n"
" C 3.5342720000 0.1278090000 0.7907040000\n"
" H 3.8718040000 -0.1139720000 1.7924450000\n"
" H 3.7775110000 1.1644510000 0.5779770000\n"
" H 3.9798490000 -0.5495240000 0.0669790000\n"
"}\n"
"\n"
"set {\n"
" guess sad\n"
" basis_guess 3-21G\n"
" basis cc-pVTZ\n"
" scf_type DF\n"
" freeze_core True\n"
"}\n"
"energy('sapt0')\n"
)
molecule = Molecule.from_xyz_file(StringIO(DIMER_XYZ_FILE), PATH_TO_PSI4)
fragments = sorted(molecule.fragment(2), key=len)
fragments[0].charge = -1
fragments[0].multiplicity = 1
fragments[1].charge = 1
fragments[1].multiplicity = 1
job = Psi4JobFormatter(fragments).energy("sapt0")
self.assertEqual(job, expected_output)
|
jasonrig/molecule-utils
|
tests/molecule_tests.py
|
Python
|
mit
| 10,132
|
[
"Psi4"
] |
b7e2fc5e470cc5a3427359bcb7310f6fb93b53d888530d78d3d382095dc0cb5d
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
import warnings
from sklearn.datasets import load_digits
from sklearn.cross_validation import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
def test_gnb_prior():
"""Test whether class priors are properly set. """
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_discrete_prior():
"""Test whether class priors are properly set. """
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
"""Test Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
"""Test input checks for the fit method"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
"""Test whether discrete NB classes use provided prior"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check wample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
"""coef_ and intercept_ should have shapes as in other linear models.
Non-regression test for issue #2127.
"""
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
|
loli/sklearn-ensembletrees
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 11,994
|
[
"Gaussian"
] |
a6b80bfbd46e30978a92cbdcb99da4e5a7a814ec1714286f60c69b04d771cad0
|
from ase import Atom
from ase.units import Hartree
from gpaw import GPAW, FermiDirac
from gpaw.cluster import Cluster
from gpaw.test import equal
h =.3
box = 4.
energy_tolerance = 0.0004
s = Cluster([Atom('Zn')])
s.minimal_box(box, h=h)
E = {}
E_U = {}
for spin in [0, 1]:
c = GPAW(h=h, spinpol=spin,
eigensolver='cg',
charge=1, occupations=FermiDirac(width=0.1, fixmagmom=spin)
)
s.set_calculator(c)
E[spin] = s.get_potential_energy()
c.set(setups=':d,3.0,1')
E_U[spin] = s.get_potential_energy()
print "E=", E
equal(E[0], E[1], energy_tolerance)
print "E_U=", E_U
equal(E_U[0], E_U[1], energy_tolerance)
|
robwarm/gpaw-symm
|
gpaw/test/Hubbard_U_Zn.py
|
Python
|
gpl-3.0
| 670
|
[
"ASE",
"GPAW"
] |
975c7053f5cacf1b49948d2ad74bc42ad3a2f4a4a6cd1d88e85943e0ca29ba57
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Jun 9, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jun 9, 2012"
import unittest
import os
from pymatgen.matproj.rest import MPRester, MPRestError
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure, Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.pdanalyzer import PDAnalyzer
from pymatgen.io.cif import CifParser
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf("MAPI_KEY" not in os.environ,
"MAPI_KEY environment variable not set.")
class MPResterTest(unittest.TestCase):
def setUp(self):
self.rester = MPRester()
def test_get_data(self):
props = ["energy", "energy_per_atom", "formation_energy_per_atom",
"nsites", "unit_cell_formula", "pretty_formula", "is_hubbard",
"elements", "nelements", "e_above_hull", "hubbards",
"is_compatible", "task_ids",
"density", "icsd_ids", "total_magnetization"]
# unicode literals have been reintroduced in py>3.2
expected_vals = [-191.33812137, -6.833504334642858, -2.551358929370749,
28, {k: v for k, v in {'P': 4, 'Fe': 4, 'O': 16, 'Li': 4}.items()},
"LiFePO4", True, ['Li', 'O', 'P', 'Fe'], 4, 0.0,
{k: v for k, v in {'Fe': 5.3, 'Li': 0.0, 'O': 0.0, 'P': 0.0}.items()}, True,
[u'mp-601412', u'mp-19017', u'mp-796535', u'mp-797820',
u'mp-540081', u'mp-797269'],
3.4662026991351147,
[159107, 154117, 160776, 99860, 181272, 166815,
260571, 92198, 165000, 155580, 38209, 161479, 153699,
260569, 260570, 200155, 260572, 181341, 181342,
72545, 56291, 97764, 162282, 155635],
16.0002716]
for (i, prop) in enumerate(props):
if prop not in ['hubbards', 'unit_cell_formula', 'elements',
'icsd_ids', 'task_ids']:
val = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertAlmostEqual(expected_vals[i], val)
elif prop in ["elements", "icsd_ids", "task_ids"]:
self.assertEqual(set(expected_vals[i]),
set(self.rester.get_data("mp-19017",
prop=prop)[0][prop]))
else:
self.assertEqual(expected_vals[i],
self.rester.get_data("mp-19017",
prop=prop)[0][prop])
props = ['structure', 'initial_structure', 'final_structure', 'entry']
for prop in props:
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
if prop.endswith("structure"):
self.assertIsInstance(obj, Structure)
elif prop == "entry":
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertIsInstance(obj, ComputedEntry)
#Test chemsys search
data = self.rester.get_data('Fe-Li-O', prop='unit_cell_formula')
self.assertTrue(len(data) > 1)
elements = {Element("Li"), Element("Fe"), Element("O")}
for d in data:
self.assertTrue(
set(Composition(d['unit_cell_formula']).elements).issubset(
elements))
self.assertRaises(MPRestError, self.rester.get_data, "Fe2O3",
"badmethod")
def test_get_materials_id_from_task_id(self):
self.assertEqual(self.rester.get_materials_id_from_task_id(
"mp-540081"), "mp-19017")
def test_get_materials_id_references(self):
# nosetests pymatgen/matproj/tests/test_rest.py:MPResterTest.test_get_materials_id_references
# self.rester points to rest/v2 by default which doesn't have the refs endpoint
m = MPRester(endpoint="https://www.materialsproject.org/rest")
data = m.get_materials_id_references('mp-123')
self.assertTrue(len(data) > 1000)
def test_find_structure(self):
# nosetests pymatgen/matproj/tests/test_rest.py:MPResterTest.test_find_structure
# self.rester points to rest/v2 by default which doesn't have the find_structure endpoint
m = MPRester(endpoint="https://www.materialsproject.org/rest")
ciffile = os.path.join(test_dir, 'Fe3O4.cif')
data = m.find_structure(ciffile)
self.assertTrue(len(data) > 1)
s = CifParser(ciffile).get_structures()[0]
data = m.find_structure(s)
self.assertTrue(len(data) > 1)
def test_get_entries_in_chemsys(self):
syms = ["Li", "Fe", "O"]
entries = self.rester.get_entries_in_chemsys(syms)
elements = set([Element(sym) for sym in syms])
for e in entries:
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(set(e.composition.elements).issubset(elements))
def test_get_structure_by_material_id(self):
s1 = self.rester.get_structure_by_material_id("mp-1")
self.assertEqual(s1.formula, "Cs1")
def test_get_entry_by_material_id(self):
e = self.rester.get_entry_by_material_id("mp-19017")
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(e.composition.reduced_formula, "LiFePO4")
def test_query(self):
criteria = {'elements': {'$in': ['Li', 'Na', 'K'], '$all': ['O']}}
props = ['pretty_formula', 'energy']
data = self.rester.query(criteria=criteria, properties=props)
self.assertTrue(len(data) > 6)
data = self.rester.query(criteria="*2O", properties=props)
self.assertGreaterEqual(len(data), 52)
self.assertIn("Li2O", (d["pretty_formula"] for d in data))
def test_get_exp_thermo_data(self):
data = self.rester.get_exp_thermo_data("Fe2O3")
self.assertTrue(len(data) > 0)
for d in data:
self.assertEqual(d.formula, "Fe2O3")
def test_get_dos_by_id(self):
dos = self.rester.get_dos_by_material_id("mp-2254")
self.assertIsInstance(dos, CompleteDos)
def test_get_bandstructure_by_material_id(self):
bs = self.rester.get_bandstructure_by_material_id("mp-2254")
self.assertIsInstance(bs, BandStructureSymmLine)
def test_get_structures(self):
structs = self.rester.get_structures("Mn3O4")
self.assertTrue(len(structs) > 0)
def test_get_entries(self):
entries = self.rester.get_entries("TiO2")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.composition.reduced_formula, "TiO2")
entries = self.rester.get_entries("TiO2", inc_structure="final")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.structure.composition.reduced_formula, "TiO2")
all_entries = self.rester.get_entries("Fe", compatible_only=False)
entries = self.rester.get_entries("Fe", compatible_only=True)
self.assertTrue(len(entries) < len(all_entries))
entries = self.rester.get_entries("Fe", compatible_only=True,
property_data=["cif"])
self.assertIn("cif", entries[0].data)
def test_get_exp_entry(self):
entry = self.rester.get_exp_entry("Fe2O3")
self.assertEqual(entry.energy, -825.5)
def test_submit_query_delete_snl(self):
s = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Fe"], [[0, 0, 0]])
# d = self.rester.submit_snl(
# [s, s], remarks=["unittest"],
# authors="Test User <test@materialsproject.com>")
# self.assertEqual(len(d), 2)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 2)
# snlids = [d["_id"] for d in data]
# self.rester.delete_snl(snlids)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 0)
def test_get_stability(self):
entries = self.rester.get_entries_in_chemsys(["Fe", "O"])
modified_entries = []
for entry in entries:
# Create modified entries with energies that are 0.01eV higher
# than the corresponding entries.
if entry.composition.reduced_formula == "Fe2O3":
modified_entries.append(
ComputedEntry(entry.composition,
entry.uncorrected_energy + 0.01,
parameters=entry.parameters,
entry_id="mod_{}".format(entry.entry_id)))
rest_ehulls = self.rester.get_stability(modified_entries)
all_entries = entries + modified_entries
compat = MaterialsProjectCompatibility()
all_entries = compat.process_entries(all_entries)
pd = PhaseDiagram(all_entries)
a = PDAnalyzer(pd)
for e in all_entries:
if str(e.entry_id).startswith("mod"):
for d in rest_ehulls:
if d["entry_id"] == e.entry_id:
data = d
break
self.assertAlmostEqual(a.get_e_above_hull(e),
data["e_above_hull"])
def test_get_reaction(self):
rxn = self.rester.get_reaction(["Li", "O"], ["Li2O"])
self.assertIn("Li2O", rxn["Experimental_references"])
def test_parse_criteria(self):
crit = MPRester.parse_criteria("mp-1234 Li-*")
self.assertIn("Li-O", crit["$or"][1]["chemsys"]["$in"])
self.assertIn({"task_id": "mp-1234"}, crit["$or"])
crit = MPRester.parse_criteria("Li2*")
self.assertIn("Li2O", crit["pretty_formula"]["$in"])
self.assertIn("Li2I", crit["pretty_formula"]["$in"])
self.assertIn("CsLi2", crit["pretty_formula"]["$in"])
crit = MPRester.parse_criteria("Li-*-*")
self.assertIn("Li-Re-Ru", crit["chemsys"]["$in"])
self.assertNotIn("Li-Li", crit["chemsys"]["$in"])
comps = MPRester.parse_criteria("**O3")["pretty_formula"]["$in"]
for c in comps:
self.assertEqual(len(Composition(c)), 3, "Failed in %s" % c)
chemsys = MPRester.parse_criteria("{Fe,Mn}-O")["chemsys"]["$in"]
self.assertEqual(len(chemsys), 2)
comps = MPRester.parse_criteria("{Fe,Mn,Co}O")["pretty_formula"]["$in"]
self.assertEqual(len(comps), 3, comps)
#Let's test some invalid symbols
self.assertRaises(KeyError, MPRester.parse_criteria, "li-fe")
self.assertRaises(KeyError, MPRester.parse_criteria, "LO2")
crit = MPRester.parse_criteria("POPO2")
self.assertIn("P2O3", crit["pretty_formula"]["$in"])
if __name__ == "__main__":
unittest.main()
|
migueldiascosta/pymatgen
|
pymatgen/matproj/tests/test_rest.py
|
Python
|
mit
| 11,617
|
[
"pymatgen"
] |
69759040a89136c72e079a8a65b72c63a7a8e3afe67fe233487505729ab7231e
|
# net.py
# A dead-simple neural network
#
# Potential improvements:
# - Support topologies other than 1 hidden layer
# - Add other training strategies (adaptive learning rate, momentum)
# - Add more error checking
import numpy as np
class Net(object):
"""
Neural network class with:
- 1 hidden layer
- Arbitrary number of neurons in each layer
- Sigmoid activation function f(x) = 1 / (1 + exp(-x))
- Bias nodes in input/hidden layers
- Gradient descent training
:Parameters:
inp_range: list (num_inputs x 2)
List of input ranges
Input values are scaled so that (min -> -1) and (max -> +1)
hidden_count: int
Number of neurons in the hidden layer
output_count: int
Number of neurons in the output layer (= number of output signals)
:Internals:
size_inp, size_hid, size_out: ints
Number of neurons in input, hidden, and output layers
w_in: 2D array
Connections between input and hidden, so that
hidden = activate(w_in * input)
w_out: 2D array
Connections between input and output, so that
output = w_out * input
:Example:
# Two inputs (ranges [1,2] and [3,4])
# One hidden layer (2 neurons)
# One output layer (1 neuron)
net = Net([[1, 2], [3, 4]], 2, 1)
"""
def __init__(self, inp_range, hidden_count, output_count):
# Make sure input matrix is Nx2 (ie: min + max for each)
try:
assert np.shape(inp_range)[1] == 2
except:
raise ValueError("Invalid shape for inp_range - need [min, max] for each node")
# Size of each layer
self.size_inp = np.shape(inp_range)[0]
self.size_hid = hidden_count
self.size_out = output_count
# Input ranges
self.inp_range = np.array(inp_range)
self.inp_min = np.reshape(self.inp_range[:, 0], [self.size_inp, -1])
self.inp_max = np.reshape(self.inp_range[:, 1], [self.size_inp, -1])
self.inp_span = self.inp_max - self.inp_min
# Random connections
self.w_in = np.random.random([self.size_hid, self.size_inp+1])-0.5
self.w_out = np.random.random([self.size_out, self.size_hid+1])-0.5
def _activate(self, layer):
"""
Perform the activation function on each neuron in the layer
Used on hidden layers
"""
return np.array([1 / (1 + np.exp(-x)) for x in layer])
def _addBias(self, layer):
"""
Add a bias node to the current layer
(ie: a neuron that always outputs 1)
"""
return np.vstack((layer, [1]))
def _scaleInput(self, input):
"""
Scale the input values such that:
Input | Output
--------------+--------
min | -1
(min+max)/2 | 0
max | +1
:Inputs:
input: 1D list of length size_inp
:Returns:
1D list of same length, with all values scaled (as described)
"""
input -= self.inp_min
input /= self.inp_span
return input
def _forwardProp(self, input):
"""
Perform forward propagation, saving the values at each layer
:Inputs:
input: a (size_inp x 1) array
:Returns:
A list of 3 1D arrays of length (size_inp, size_hid, size_out)
Calculated values at each node in all 3 layers
"""
# Save intermediate results
o = []
# Input
layer = np.array(input, copy=True)
layer = np.reshape(layer, [-1, 1])
layer = self._scaleInput(layer)
layer = self._addBias(layer)
o.append(layer)
# Hidden
layer = self._activate(np.dot(self.w_in, layer))
layer = self._addBias(layer)
o.append(layer)
# Output
layer = np.dot(self.w_out, layer)
o.append(layer)
return o
def _backProp(self, o, target):
"""
Perform backward propagation, calculating deltas at each node
:Inputs:
o: list of 3 arrays (1 x size_inp, 1 x size_hid, 1 x size_out)
Output values at each node
target: (1 x size_out) array of
Expected values at output layer
:Returns:
List of 2 arrays (1 x size_hid, 1 x size_out)
Delta_j values at each node in hidden and output layers
"""
delta = [[] for _ in range(2)]
delta[0] = np.zeros([self.size_hid, 1])
# Output delta
delta[1] = o[2] - np.reshape(target, [-1, 1])
# Hidden delta
for j in range(self.size_hid):
for k in range(self.size_out):
delta[0][j] += delta[1][k] * self.w_out[k][j]
delta[0][j] *= o[1][j] * (1 - o[1][j])
return delta
def sim(self, input):
"""
Find the output values, given a set of input values
:Inputs:
input: a 1D array of length size_inp
:Returns:
A 1D array of length size_out
"""
# Make sure the input is the right size
try:
size = np.shape(input)[0]
assert size == self.size_inp
except:
raise ValueError("Expected input of size {}; got {}".format(size, self.size_inp))
# We only need to do forward propagation
input = np.reshape(np.array(input), [-1, 1])
return self._forwardProp(input)[2]
def train_1(self, input, target, lr):
"""
Perform forward propagation to get the output values,
then backward propagation to update the weights
Repeat for a number of tests
:Inputs:
input: 2D array
A <tests>x<size_inp> array. The input for test i is input[i, :]
target: 2D array
A <tests>x<size_out> array. The target for test i is target[i, :]
lr: float
Learning rate - preferably in the range (0, 1)
:Returns:
The error (SSE) accumulated over all of the inputs
"""
# Make sure the input is the right size
try:
size = np.shape(input)[1]
assert size == self.size_inp
except:
raise ValueError("Expected input of size {}; got {}".format(size, self.size_inp))
# Keep track of total error in tests
error = 0
# Optional: reorder test cases randomly (does this help?)
order = range(len(input))
np.random.shuffle(order)
for i in order:
# Forward propagation
o = self._forwardProp(input[i])
# Backward propagation
delta = self._backProp(o, target[i, :])
# Update weights
delta_w_out = -lr * np.dot(delta[1], np.reshape(o[1], [1, -1]))
delta_w_in = -lr * np.dot(delta[0], np.reshape(o[0], [1, -1]))
self.w_out += delta_w_out
self.w_in += delta_w_in
# Add this error to running total
error += np.sum((o[2] - np.reshape(target[i], [-1, 1]))**2)
return error
def train_many(self, input, target, lr, epochs, goal, verbose=False):
"""
Train on the same data set multiple times,
Stop as soon as target accuracy or max epochs are reached.
:Inputs:
input: 2D array
A <tests>x<size_inp> array. The input for test i is input[i, :]
target: 2D array
A <tests>x<size_out> array. The target for test i is target[i, :]
lr: float
Learning rate - preferably in the range (0, 1)
epochs: int
Maximum number of training iterations
goal: float
Target accuracy.
verbose: boolean
If true, print error at each epoch
:Returns:
A list of the error at each epoch
"""
error = []
for i in range(epochs):
nextError = self.train_1(input, target, lr)
if verbose:
print "{}: {}".format(i+1, nextError)
error.append(nextError)
if nextError < goal:
break
return error
def main():
"""
Test code: learn XOR with 3 hidden nodes
"""
# Set up neural net
net = Net([[0, 1], [0, 1]], 3, 1)
# Set up dataset
input = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]*20)
target = np.array([[0], [1], [1], [0] ]*20)
# Print it to check
#print input
#print target
# Train for 100 epochs
error = net.train_many(input, target, 0.4, 100, 0.01)
#print "\n".join(map(str, error))
# Check that we've learned everything
print net.sim([0, 0]) # 0
print net.sim([0, 1]) # 1
print net.sim([1, 0]) # 1
print net.sim([1, 1]) # 0
#print net.sim([1, 1, 1]) # ValueError()
if __name__ == "__main__":
main()
|
gregdeon/simple-ann
|
src/net.py
|
Python
|
mit
| 9,516
|
[
"NEURON"
] |
4ba6373dbd302adb7c20e68d14eb81bf626d0911c692f660e4889743478ccda3
|
from pyrocopy import pyrocopy
from setuptools import setup, find_packages
setup(
name='pyrocopy',
version=pyrocopy.__version_str__,
description='A suite of robust file copying utilities for Python.',
long_description="""pyrocopy is a suite of advanced file utility functions for efficiently copying all or part of a directory tree. It can be used as a module in your own application or run as a standalone command line tool.
Main Features
-------------
- Mirror Mode
- Sync Mode (bi-directional copy)
- Regular expression based filename and directory matching
- Configurable maximum tree depth traversal
- Detailed operation statistics
For complete documentation please visit the project page on `GitHub <https://github.com/caskater4/pyrocopy>`_.""",
url='https://github.com/caskater4/pyrocopy',
author='Jean-Philippe Steinmetz',
author_email='caskater47@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
keywords='file utilities admin filesystem copy move mirror sync',
packages=find_packages(exclude=['tests']),
install_requires=[],
entry_points={
'console_scripts': [
'pyrocopy=pyrocopy:main',
],
},
)
|
caskater4/pyrocopy
|
setup.py
|
Python
|
mit
| 2,047
|
[
"VisIt"
] |
61c7c5c6d678f69439f971d063540de2e0ee87b8c992ba325f683ef41061450a
|
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import ast
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import token
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hardcoding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
processed = set()
names = dir(object)
# :dd any DynamicClassAttributes to the list of names if object is a class;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists
try:
for base in object.__bases__:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
except AttributeError:
pass
for key in names:
# First try to get the value via getattr. Some descriptors don't
# like calling their __get__ (see bug #1785), so fall back to
# looking in the __dict__.
try:
value = getattr(object, key)
# handle the duplicate key
if key in processed:
raise AttributeError
except AttributeError:
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
# could be a (currently) missing slot member, or a buggy
# __dir__; discard and move on
continue
if not predicate or predicate(value):
results.append((key, value))
processed.add(key)
results.sort(key=lambda pair: pair[0])
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method or descriptor
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained by calling getattr; if this fails, or if the
resulting object does not live anywhere in the class' mro (including
metaclasses) then the object is looked up in the defining class's
dict (found by walking the mro).
If one of the items in dir(cls) is stored in the metaclass it will now
be discovered and not have None be listed as the class in which it was
defined. Any items whose home class cannot be discovered are skipped.
"""
mro = getmro(cls)
metamro = getmro(type(cls)) # for attributes stored in the metaclass
metamro = tuple([cls for cls in metamro if cls not in (type, object)])
class_bases = (cls,) + mro
all_bases = class_bases + metamro
names = dir(cls)
# :dd any DynamicClassAttributes to the list of names;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists.
for base in mro:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
result = []
processed = set()
for name in names:
# Get the object associated with the name, and where it was defined.
# Normal objects will be looked up with both getattr and directly in
# its class' dict (in case getattr fails [bug #1785], and also to look
# for a docstring).
# For DynamicClassAttributes on the second pass we only look in the
# class's dict.
#
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
homecls = None
get_obj = None
dict_obj = None
if name not in processed:
try:
if name == '__dict__':
raise Exception("__dict__ is special, don't want the proxy")
get_obj = getattr(cls, name)
except Exception as exc:
pass
else:
homecls = getattr(get_obj, "__objclass__", homecls)
if homecls not in class_bases:
# if the resulting object does not live somewhere in the
# mro, drop it and search the mro manually
homecls = None
last_cls = None
# first look in the classes
for srch_cls in class_bases:
srch_obj = getattr(srch_cls, name, None)
if srch_obj == get_obj:
last_cls = srch_cls
# then check the metaclasses
for srch_cls in metamro:
try:
srch_obj = srch_cls.__getattr__(cls, name)
except AttributeError:
continue
if srch_obj == get_obj:
last_cls = srch_cls
if last_cls is not None:
homecls = last_cls
for base in all_bases:
if name in base.__dict__:
dict_obj = base.__dict__[name]
if homecls not in metamro:
homecls = base
break
if homecls is None:
# unable to locate the attribute anywhere, most likely due to
# buggy custom __dir__; discard and move on
continue
obj = get_obj or dict_obj
# Classify the object or its descriptor.
if isinstance(dict_obj, staticmethod):
kind = "static method"
obj = dict_obj
elif isinstance(dict_obj, classmethod):
kind = "class method"
obj = dict_obj
elif isinstance(dict_obj, property):
kind = "property"
obj = dict_obj
elif isroutine(obj):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
processed.add(name)
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------------- function helpers
def unwrap(func, *, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
if hasattr(object, '__module__'):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
import imp
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if getattr(getmodule(object, filename), '__loader__', None) is not None:
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An OSError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise OSError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise OSError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise OSError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (OSError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An OSError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a callable object's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_internal(func,
follow_wrapper_chains=False,
skip_bound_arg=False)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
kwonlyargs = []
defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
args.append(name)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(*names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(*func_and_positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
func = func_and_positional[0]
positional = func_and_positional[1:]
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwonlydefaults and kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
def _signature_get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def _signature_get_partial(wrapped_sig, partial, extra_args=()):
# Internal helper to calculate how 'wrapped_sig' signature will
# look like after applying a 'functools.partial' object (or alike)
# on it.
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values())
def _signature_bound_method(sig):
# Internal helper to transform signatures for unbound
# functions to bound methods
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError('invalid method signature')
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError('invalid argument type')
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
def _signature_is_builtin(obj):
# Internal helper to test if `obj` is a callable that might
# support Argument Clinic's __text_signature__ protocol.
return (isbuiltin(obj) or
ismethoddescriptor(obj) or
isinstance(obj, _NonUserDefinedCallables) or
# Can't test 'isinstance(type)' here, as it would
# also be True for regular python classes
obj in (type, object))
def _signature_is_functionlike(obj):
# Internal helper to test if `obj` is a duck type of FunctionType.
# A good example of such objects are functions compiled with
# Cython, which have all attributes that a pure Python function
# would have, but have their code statically compiled.
if not callable(obj) or isclass(obj):
# All function-like objects are obviously callables,
# and not classes.
return False
name = getattr(obj, '__name__', None)
code = getattr(obj, '__code__', None)
defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
annotations = getattr(obj, '__annotations__', None)
return (isinstance(code, types.CodeType) and
isinstance(name, str) and
(defaults is None or isinstance(defaults, tuple)) and
(kwdefaults is None or isinstance(kwdefaults, dict)) and
isinstance(annotations, dict))
def _signature_get_bound_param(spec):
# Internal helper to get first parameter name from a
# __text_signature__ of a builtin method, which should
# be in the following format: '($param1, ...)'.
# Assumptions are that the first argument won't have
# a default value or an annotation.
assert spec.startswith('($')
pos = spec.find(',')
if pos == -1:
pos = spec.find(')')
cpos = spec.find(':')
assert cpos == -1 or cpos > pos
cpos = spec.find('=')
assert cpos == -1 or cpos > pos
return spec[2:pos]
def _signature_strip_non_python_syntax(signature):
"""
Takes a signature in Argument Clinic's extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
# Internal helper to parse content of '__text_signature__'
# and return a Signature based on it
Parameter = cls._parameter_cls
clean_signature, self_parameter, last_positional_only = \
_signature_strip_non_python_syntax(s)
program = "def foo" + clean_signature + ": pass"
try:
module = ast.parse(program)
except SyntaxError:
module = None
if not isinstance(module, ast.Module):
raise ValueError("{!r} builtin has invalid signature".format(obj))
f = module.body[0]
parameters = []
empty = Parameter.empty
invalid = object()
module = None
module_dict = {}
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
module_dict = module.__dict__
sys_module_dict = sys.modules
def parse_name(node):
assert isinstance(node, ast.arg)
if node.annotation != None:
raise ValueError("Annotations are not currently supported")
return node.arg
def wrap_value(s):
try:
value = eval(s, module_dict)
except NameError:
try:
value = eval(s, sys_module_dict)
except NameError:
raise RuntimeError()
if isinstance(value, str):
return ast.Str(value)
if isinstance(value, (int, float)):
return ast.Num(value)
if isinstance(value, bytes):
return ast.Bytes(value)
if value in (True, False, None):
return ast.NameConstant(value)
raise RuntimeError()
class RewriteSymbolics(ast.NodeTransformer):
def visit_Attribute(self, node):
a = []
n = node
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
raise RuntimeError()
a.append(n.id)
value = ".".join(reversed(a))
return wrap_value(value)
def visit_Name(self, node):
if not isinstance(node.ctx, ast.Load):
raise ValueError()
return wrap_value(node.id)
def p(name_node, default_node, default=empty):
name = parse_name(name_node)
if name is invalid:
return None
if default_node and default_node is not _empty:
try:
default_node = RewriteSymbolics().visit(default_node)
o = ast.literal_eval(default_node)
except ValueError:
o = invalid
if o is invalid:
return None
default = o if o is not invalid else default
parameters.append(Parameter(name, kind, default=default, annotation=empty))
# non-keyword-only parameters
args = reversed(f.args.args)
defaults = reversed(f.args.defaults)
iter = itertools.zip_longest(args, defaults, fillvalue=None)
if last_positional_only is not None:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
for i, (name, default) in enumerate(reversed(list(iter))):
p(name, default)
if i == last_positional_only:
kind = Parameter.POSITIONAL_OR_KEYWORD
# *args
if f.args.vararg:
kind = Parameter.VAR_POSITIONAL
p(f.args.vararg, empty)
# keyword-only arguments
kind = Parameter.KEYWORD_ONLY
for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
p(name, default)
# **kwargs
if f.args.kwarg:
kind = Parameter.VAR_KEYWORD
p(f.args.kwarg, empty)
if self_parameter is not None:
# Possibly strip the bound argument:
# - We *always* strip first bound argument if
# it is a module.
# - We don't strip first bound argument if
# skip_bound_arg is False.
assert parameters
_self = getattr(obj, '__self__', None)
self_isbound = _self is not None
self_ismodule = ismodule(_self)
if self_isbound and (self_ismodule or skip_bound_arg):
parameters.pop(0)
else:
# for builtins, self parameter is always positional-only!
p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
parameters[0] = p
return cls(parameters, return_annotation=cls.empty)
def _signature_from_builtin(cls, func, skip_bound_arg=True):
# Internal helper function to get signature for
# builtin callables
if not _signature_is_builtin(func):
raise TypeError("{!r} is not a Python builtin "
"function".format(func))
s = getattr(func, "__text_signature__", None)
if not s:
raise ValueError("no signature found for builtin {!r}".format(func))
return _signature_fromstr(cls, func, s, skip_bound_arg)
def _signature_internal(obj, follow_wrapper_chains=True, skip_bound_arg=True):
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = _signature_internal(obj.__func__,
follow_wrapper_chains,
skip_bound_arg)
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
# Was this function wrapped by a decorator?
if follow_wrapper_chains:
obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
partialmethod = obj._partialmethod
except AttributeError:
pass
else:
if isinstance(partialmethod, functools.partialmethod):
# Unbound partialmethod (see functools.partialmethod)
# This means, that we need to calculate the signature
# as if it's a regular partial object, but taking into
# account that the first positional argument
# (usually `self`, or `cls`) will not be passed
# automatically (as for boundmethods)
wrapped_sig = _signature_internal(partialmethod.func,
follow_wrapper_chains,
skip_bound_arg)
sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
new_params = (first_wrapped_param,) + tuple(sig.parameters.values())
return sig.replace(parameters=new_params)
if isfunction(obj) or _signature_is_functionlike(obj):
# If it's a pure Python function, or an object that is duck type
# of a Python function (Cython functions, for instance), then:
return Signature.from_function(obj)
if _signature_is_builtin(obj):
return _signature_from_builtin(Signature, obj,
skip_bound_arg=skip_bound_arg)
if isinstance(obj, functools.partial):
wrapped_sig = _signature_internal(obj.func,
follow_wrapper_chains,
skip_bound_arg)
return _signature_get_partial(wrapped_sig, obj)
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = _signature_internal(call,
follow_wrapper_chains,
skip_bound_arg)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _signature_get_user_defined_method(obj, '__new__')
if new is not None:
sig = _signature_internal(new,
follow_wrapper_chains,
skip_bound_arg)
else:
# Finally, we should have at least __init__ implemented
init = _signature_get_user_defined_method(obj, '__init__')
if init is not None:
sig = _signature_internal(init,
follow_wrapper_chains,
skip_bound_arg)
if sig is None:
# At this point we know, that `obj` is a class, with no user-
# defined '__init__', '__new__', or class-level '__call__'
for base in obj.__mro__[:-1]:
# Since '__text_signature__' is implemented as a
# descriptor that extracts text signature from the
# class docstring, if 'obj' is derived from a builtin
# class, its own '__text_signature__' may be 'None'.
# Therefore, we go through the MRO (except the last
# class in there, which is 'object') to find the first
# class with non-empty text signature.
try:
text_sig = base.__text_signature__
except AttributeError:
pass
else:
if text_sig:
# If 'obj' class has a __text_signature__ attribute:
# return a signature based on it
return _signature_fromstr(Signature, obj, text_sig)
# No '__text_signature__' was found for the 'obj' class.
# Last option is to check if its '__init__' is
# object.__init__ or type.__init__.
if type not in obj.__mro__:
# We have a class (not metaclass), but no user-defined
# __init__ or __new__ for it
if obj.__init__ is object.__init__:
# Return a signature of 'object' builtin.
return signature(object)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
try:
sig = _signature_internal(call,
follow_wrapper_chains,
skip_bound_arg)
except ValueError as ex:
msg = 'no signature found for {!r}'.format(obj)
raise ValueError(msg) from ex
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
def signature(obj):
'''Get a signature object for the passed callable.'''
return _signature_internal(obj)
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is set to
`Parameter.empty`.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is set to
`Parameter.empty`.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is _empty:
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
raise TypeError("name must be a str, not a {!r}".format(name))
if not name.isidentifier():
raise ValueError('{!r} is not a valid parameter name'.format(name))
self._name = name
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void,
annotation=_void, default=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
return type(self)(name, kind, default=default, annotation=annotation)
def __str__(self):
kind = self.kind
formatted = self._name
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is set to `Signature.empty`.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
kind_defaults = False
for idx, param in enumerate(parameters):
kind = param.kind
name = param.name
if kind < top_kind:
msg = 'wrong parameter order: {!r} before {!r}'
msg = msg.format(top_kind, kind)
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
top_kind = kind
if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
if param.default is _empty:
if kind_defaults:
# No default for this parameter, but the
# previous parameter of the same kind had
# a default
msg = 'non-default argument follows default ' \
'argument'
raise ValueError(msg)
else:
# There is a default for this parameter.
kind_defaults = True
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
is_duck_function = False
if not isfunction(func):
if _signature_is_functionlike(func):
is_duck_function = True
else:
# If it's not a pure Python function, and not a duck type
# of pure function:
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if func_code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
# Is 'func' is a pure Python function - don't validate the
# parameters list (for correct order and defaults), it should be OK.
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=is_duck_function)
@classmethod
def from_builtin(cls, func):
return _signature_from_builtin(cls, func)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return args[0]._bind(args[1:], kwargs)
def bind_partial(*args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return args[0]._bind(args[1:], kwargs, partial=True)
def __str__(self):
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append('/')
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append('/')
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
def _main():
""" Logic for inspecting an object given at command line """
import argparse
import importlib
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj))
if __name__ == "__main__":
_main()
|
PennartLoettring/Poettrix
|
rootfs/usr/lib/python3.4/inspect.py
|
Python
|
gpl-2.0
| 103,929
|
[
"VisIt"
] |
a65505f17a72f8d07f85a68e7a90f2ba9962b77670593f8f7b22ff4e2fdcc05b
|
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
"""
Holds global stuff for tldap.
Q
Shortcut to :py:class:`tldap.query_utils.Q`, allows combining query terms.
DEFAULT_LDAP_ALIAS
Alias for default LDAP connection.
"""
from tldap.query_utils import Q # noqa: F401
__author__ = """Brian May"""
__email__ = 'brian@linuxpenguins.xyz'
__version__ = '1.0.6'
|
Karaage-Cluster/python-tldap
|
tldap/__init__.py
|
Python
|
gpl-3.0
| 1,024
|
[
"Brian"
] |
38e84da53d0814ad8ee09c92c83623bd3279a1f9db3972e19291043fb72b376c
|
# data modules
import glob
import sys
import astropy.io.fits as fits
import os
from os.path import join
import pickle
import time
# numerical modules
import numpy as n
from scipy.interpolate import interp1d
from scipy.misc import derivative
from scipy.optimize import minimize
from scipy.optimize import curve_fit
# plotting modules
import matplotlib
#matplotlib.use('pdf')
matplotlib.rcParams['font.size']=14
import matplotlib.pyplot as p
# mass function theory
from hmf import MassFunction
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
cosmoDS = FlatLambdaCDM(H0=68.46*u.km/u.s/u.Mpc, Om0=0.298734, Ob0=0.046961)
from scipy.interpolate import interp1d
from scipy.integrate import quad
# Fitting functions
# velocity function
vf = lambda v, A, v0, alpha, beta : n.log10( 10**A * ( 01.+(10**v/10**v0)**(-beta)) * n.e**(- (10**v/10**v0)**(alpha) ) )
vf_ps = lambda v, ps : vf( v, ps[0], ps[1], ps[2], ps[3])
"""
# sheth and tormen function
fnu_ST = lambda nu, A, a, p: A * (2./n.pi)**(0.5) * (a*nu) * ( 1 + (a * nu) **(-2.*p) ) * n.e**( -( a * nu )**2. / 2.)
log_fnu_ST = lambda logNu, Anorm, a, q : n.log10( fnu_ST(10.**logNu, Anorm, a, q) )
log_fnu_ST_ps = lambda logNu, ps : n.log10( fnu_ST(10.**logNu, ps[0], ps[1], ps[2]) )
p_ST_despali = [0.333, 0.794, 0.247]
p_ST_sheth = [0.3222, 0.707, 0.3]
p_T08_klypin = [0.224, 1.67, 1.80, 1.48]
p_T08_RP = [0.144, 1.351, 3.113, 1.187]
# tinker function
fsigma_T08 = lambda sigma, A, a, b, c : A *(1+ (b/sigma)**a) * n.e**(-c/sigma**2.)
log_fsigma_T08 = lambda logsigma, A, a, b, c : n.log10(A *(1+ (b/(10**logsigma))**a) * n.e**(-c/(10**logsigma)**2.))
log_fsigma_T08_ps = lambda logsigma, ps : log_fsigma_T08(logsigma, ps[0], ps[1], ps[2], ps[3])
#fnu_SMT = lambda nu, Anorm, a, q : Anorm * (2./n.pi)**(0.5) * (a*nu) * ( 1 + (a*nu) **(-2*q) ) * n.e**( - (a*nu)**2. / 2.) #
#fnu_SMT = lambda nu, Anorm, a, q : Anorm *a * (2.*n.pi)**(-0.5) * ( 1 + (a**2*nu) **(-q) ) * n.e**( - a**2*nu / 2.)
#log_fnu_ST01 = lambda logNu, Anorm, a, q : n.log10( fnu_SMT(10.**logNu, Anorm, a, q) )
#log_fnu_ST01_ps = lambda logNu, ps : n.log10( fnu_SMT(10.**logNu, ps[0], ps[1], ps[2]) )
"""
delta_c = 1.686
sigma = n.arange(0.05,10,0.05)
X = n.arange(-0.6, 0.5, 0.01) #n.log10(1./sigma)
sigma = 10**-X
#f_dsp_nu = lambda nu, A, a, p: A* (a / (nu * 2 * n.pi))**(0.5) * ( 1 + 1. / (a*nu) **p ) * n.e**( - a * nu / 2.)
#nufnu_dsp = lambda nu, A, a, p: A* ((2 * a * nu) / ( n.pi))**(0.5) * ( 1 + (a*nu) **(-p) ) * n.e**( - a * nu / 2.)
# nu = (delta_c/sigma)**2.
f_T08 = lambda sigma, A, a, b, c : A*( (sigma/b)**(-a) + 1 )*n.e**(-c/sigma**2.)
f_ST = lambda sigma, A, a, p: A* (2./n.pi)**(0.5) * ( 1 + (sigma**2./(a**delta_c*2.))**(p) )*(delta_c*a**0.5/sigma)*n.e**(-a*delta_c**2./(2.*sigma**2.))
f_BH = lambda sigma, A, a, p, q: A* (2./n.pi)**(0.5) * ( 1 + (sigma**2./(a**delta_c*2.))**(p) )*(delta_c*a**0.5/sigma)**(q)*n.e**(-a*delta_c**2./(2.*sigma**2.))
b_BH = lambda sigma, a, p, q: 1 + (a*(delta_c/sigma)**2. - q) / delta_c + (2*p/delta_c)/(1 + (a*(delta_c/sigma)**2.)**p)
"""
M200c
ftT08 = f_T08(sigma, 0.186, 1.47, 2.57, 1.19)
ftSk14 = f_T08(sigma, 0.18587, 1.46690, 2.57110, 1.19396)
ftK16 = f_T08(sigma, 0.224, 1.67, 1.80, 1.48)
ftA12 = f_T08 (sigma, 0.201, 1.7, 2.08, 1.172)
ftW13 = f_T08 (sigma, 0.282, 2.163, 1.406, 1.21)
ftST01 = f_ST(sigma, 0.3222, 0.707, 0.3)
#ftD16 = f_dsp(sigma, 0.287, 0.903, 0.322 )
ftC16 = f_T08(sigma, 0.12, 1.19, 3.98, 1.35)
ftC16st = f_dsp(sigma, 0.2906, 0.8962, 0.1935 )
"""
ftC16 = f_BH(sigma, 0.28074, 0.90343, 0.64031, 1.69561)
ftC16st = f_ST(sigma, 0.31704, 0.81869, 0.11821)
ftC16st_sat = f_ST(sigma, 0.04235, 1.70219, 0.83118)
ftD16 = f_ST(sigma, 0.333, 0.794, 0.247 )
ftRP16 = f_T08 (sigma, 0.144, 1.351, 3.113, 1.187)
ftT08 = f_T08(sigma, 0.200, 1.47, 2.57, 1.19) # 300 at z=0
#ftST01 = f_ST(sigma, 0.3222, 0.707, 0.3)
ftST02 = f_ST(sigma, 0.3222, 0.75, 0.3)
ftBH11 = f_BH(sigma, 0.333, 0.788, 0.807, 1.795)
# MULTIDARK TABLE GENERIC FUNCTIONS
mSelection = lambda data, qty, logNpmin : (data["log_"+qty]>data["logMpart"]+logNpmin)
vSelection = lambda data, qty, limits_04, limits_10, limits_25, limits_40 : ((data["boxLengthComoving"]==400.)&(data[qty+"_min"]>limits_04[0]) &(data[qty+"_max"]<limits_04[1])) | ((data["boxLengthComoving"]==1000.)&(data[qty+"_min"]>limits_10[0]) &(data[qty+"_max"]<limits_10[1])) | ((data["boxLengthComoving"]==2500.)&(data[qty+"_min"]>limits_25[0]) &(data[qty+"_max"]<limits_25[1])) | ((data["boxLengthComoving"]==4000.)&(data[qty+"_min"]>limits_40[0])&(data[qty+"_max"]<limits_40[1]))
zSelection = lambda data, zmin, zmax : (data["redshift"]>zmin)&(data["redshift"]<zmax)
nSelection = lambda data, NminCount, cos : (data['dN_counts_'+cos]>NminCount)
# MVIR 1point FUNCTION
def plot_mvir_function_data(log_mvir, logsigM1, logNu, log_MF, log_MF_c, redshift, zmin, zmax, cos = "cen", figName="", dir=join(os.environ['MVIR_DIR'])):
"""
:param log_mvir: x coordinates
:param log_VF: y coordinates
:param redshift: color coordinate
:param zmin: minimum redshift
:param zmax: maximum redshift
:param cos: centra or satelitte. Default: "cen"
:param figName: string to be added to the figure name. Default:=""
:param dir: working directory. :param qty: quantity studied. Default: 'mvir'
"""
# now the plots
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(logsigM1, log_MF, c=redshift, s=5, marker='o',label="MD "+cos+" data", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
sigs = n.arange(-0.5,.6, 0.01)
#p.plot(X, n.log10(ftT08), 'k--', label='T08', lw=2)
p.plot(X, n.log10(ftC16), 'k--', label='fit', lw=2)
#p.plot(X, n.log10(ftBH11), 'g--', label='B11', lw=2)
#p.plot(X, n.log10(ftC16), 'r--', label='this work', lw=2)
p.xlabel(r'$log_{10}(\sigma^{-1})$')
p.ylabel(r'$\log_{10}\left[ \frac{M}{\rho_m} \frac{dn}{d\ln M} \left|\frac{d\ln M }{d\ln \sigma}\right|\right] $')
# log$_{10}[ n(>M)]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((-3.5,0))
p.grid()
p.savefig(join(dir,"mvir-"+figName+cos+"-differential-function-data-xSigma.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(logNu, log_MF, c=redshift, s=5, marker='o',label="MD "+cos+" data", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
nus = n.arange(-0.3,1., 0.05)
#p.plot(nus, log_fnu_ST_ps(nus, p_ST_despali), 'k--', label='Despali 16')
#p.plot(nus, log_fnu_ST_ps(nus, p_ST_sheth), 'b--', label='Sheth 01')
p.xlabel(r'$ln(\nu)$')
p.ylabel(r'$\log_{10}\left[ \frac{M}{\rho_m} \frac{dn}{d\ln M} \left|\frac{d\ln M }{d\ln \sigma}\right|\right] $')
# log$_{10}[ n(>M)]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((-3.5,0))
p.xlim((-0.3, 0.8))
p.grid()
p.savefig(join(dir,"mvir-"+figName+cos+"-differential-function-data-xNu.png"))
p.clf()
"""
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(log_mvir, log_MF, c=redshift, s=5, marker='o',label="MD "+cos+" data", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[M_{vir}/(h^{-1}M_\odot)]$')
p.ylabel(r'log$_{10} (M^2/\rho_m) dn(M)/dM$')
# log$_{10}[ n(>M)]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((-4.5,-1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,"mvir-"+figName+cos+"-differential-function-data-xMass.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(logsigM1, log_MF, c=redshift, s=5, marker='o',label="MD "+cos+" data", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'$ln(\sigma^{-1})$')
p.ylabel(r'log$_{10} (M^2/\rho_m) dn(M)/dM$')
# log$_{10}[ n(>M)]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((-4.5,-1))
p.grid()
p.savefig(join(dir,"mvir-"+figName+cos+"-differential-function-data-xSigma.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(log_mvir, log_MF_c, c=redshift, s=5, marker='o',label="MD "+cos+" data", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[M_{vir}/(h^{-1}M_\odot)]$')
p.ylabel(r'log$_{10} (M^2/\rho_m) n(>M)$')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((-4.5,-1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,"mvir-"+figName+cos+"-cumulative-function-data-xMass.png"))
p.clf()
"""
def plot_mvir_function_data_perBox(log_mvir, log_VF, MD04, MD10, MD25, MD25NW, MD40, MD40NW, cos = "cen", figName="", dir=join(os.environ['MVIR_DIR'])):
"""
:param log_mvir: x coordinates
:param log_VF: y coordinates
:param cos: centra or satelitte. Default: "cen"
:param figName: string to be added to the figure name. Default:=""
:param dir: working directory.
:param qty: quantity studied. Default: 'mvir'
"""
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
p.plot(log_mvir[MD04], log_VF[MD04],marker='1',label="MD04",ls='')
p.plot(log_mvir[MD10], log_VF[MD10],marker='2',label="MD10",ls='')
p.plot(log_mvir[MD25], log_VF[MD25],marker='|',label="MD25",ls='')
p.plot(log_mvir[MD40], log_VF[MD40],marker='_',label="MD40",ls='')
p.plot(log_mvir[MD25NW], log_VF[MD25NW],marker='+',label="MD25NW",ls='')
p.plot(log_mvir[MD40NW], log_VF[MD40NW],marker='x',label="MD40NW",ls='')
p.xlabel(r'log$_{10}[M_{vir}/(M_\odot)]$')
p.ylabel(r'$\log_{10}\left[ \frac{M}{\rho_m} \frac{dn}{d\ln M} \left|\frac{d\ln M }{d\ln \sigma}\right|\right] $')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((-4.5,0))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,"mvir-"+figName+cos+"-differential-function-data-perBox.png"))
p.clf()
def plot_mvir_function_jackknife_poisson_error(x, y, MD04, MD10, MD25, MD25NW, MD40, MD40NW, cos = "cen", dir=join(os.environ['MVIR_DIR'])):
"""
:param x: x coordinates
:param y: y coordinates
:param cos: centra or satelitte. Default: "cen"
:param dir: working directory.
:param qty: quantity studied. Default: 'mvir'
"""
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
p.plot(x, y, 'ko', label='all', alpha=0.01)
p.plot(x[MD04], y[MD04],marker='x',label="MD04",ls='')
p.plot(x[MD10], y[MD10],marker='+',label="MD10",ls='')
p.plot(x[MD25], y[MD25],marker='^',label="MD25",ls='')
p.plot(x[MD40], y[MD40],marker='v',label="MD40",ls='')
p.plot(x[MD25NW], y[MD25NW],marker='^',label="MD25NW",ls='')
p.plot(x[MD40NW], y[MD40NW],marker='v',label="MD40NW",ls='')
xx = n.logspace(-4,0,20)
p.plot(xx, xx*3., ls='--', label='y=3x')
#p.axhline(Npmin**-0.5, c='r', ls='--', label='min counts cut')#r'$1/\sqrt{10^3}$')
#p.axhline((10**6.87)**-0.5, c='k', ls='--', label='min mvir cut')#r'$1/\sqrt{10^{4.87}}$')
#p.xlim((2e-4,4e-1))
#p.ylim((2e-4,4e-1))
p.ylabel(r'$1/\sqrt{count} \; [\%]$')
p.xlabel(r'Jackknife Resampling Error [%]')
p.yscale('log')
p.xscale('log')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join(dir,"mvir-"+cos+"-jackknife-countsSqrt.png"))
p.clf()
def plot_mvir_function_data_error(log_mvir, error, redshift, label, zmin, zmax, cos = "cen", figName="mvir-cen-data04-uncertainty.png", dir=join(os.environ['MVIR_DIR'])):
"""
:param log_mvir: x coordinates
:param error: y coordinates
:param redshift: color coordinate
:param label: label in the caption
:param zmin: minimum redshift
:param zmax: maximum redshift
:param cos: centra or satelitte. Default: "cen"
:param figName: string to be added to the figure name. Default:=""
:param dir: working directory. :param qty: quantity studied. Default: 'mvir'
"""
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(log_mvir, 100*error, c=redshift, s=5, marker='o',label=label, rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[V_{max}/(km \; s^{-1})]$')
p.ylabel(r'JK relative error [%]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((2e-2,30))
#p.xlim((1.5, 3.5))
p.yscale('log')
p.grid()
p.savefig(join(dir,figName))
p.clf()
def fit_mvir_function_z0(data, x_data, y_data , y_err, p0, tolerance = 0.03, cos = "cen", dir=join(os.environ['MVIR_DIR'])):
"""
Fits a function to the mvir data
:param data: data table of the selected points for the fit
:param x_data: x coordinate
:param y_data: y coordinate
:param y_err: error
:param p0: first guess
:param tolerance: percentage error tolerance to compute how many points are outside of the fit
:param cos: central or satelitte
:param mode: fitting mode, "curve_fit" or "minimize"
:param dir: working dir
:param qty: mvir here
:return: result of the fit: best parameter array and covariance matrix
produces a plot of the residuals
"""
pOpt, pCov=curve_fit(log_fnu_ST, x_data, y_data, p0, y_err, maxfev=500000)#, bounds=boundaries)
print( "best params=", pOpt )
print( "err=", pCov.diagonal()**0.5 )
x_model = n.arange(n.min(x_data),n.max(x_data),0.005)
y_model = log_fnu_ST(x_model, pOpt[0], pOpt[1], pOpt[2])
n.savetxt(join(dir,"mvir-"+cos+"-differential-function-z0-model-pts.txt"),n.transpose([x_model, y_model]) )
outfile=open(join(dir,"mvir-"+cos+"-diff-function-z0-params.pkl"), 'w')
pickle.dump([pOpt, pCov], outfile)
outfile.close()
f_diff = y_data - log_fnu_ST(x_data, pOpt[0], pOpt[1], pOpt[2])
MD_sel_fun=lambda name : (data["boxName"]==name)
MDnames= n.array(['M04', 'M10', 'M25','M40','M25n','M40n'])
MDsels=n.array([MD_sel_fun(name) for name in MDnames])
f_diff_fun = lambda MDs: y_data[MDs] - log_fnu_ST(x_data[MDs], pOpt[0], pOpt[1], pOpt[2])
f_diffs = n.array([f_diff_fun(MD) for MD in MDsels])
print( "================================" )
# now the plots
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
for index, fd in enumerate(f_diffs):
inTol = (abs(10**fd-1)<tolerance)
print( index )
if len(fd)>0:
p.errorbar(x_data[MDsels[index]], 10**fd, yerr = y_err[MDsels[index]] , rasterized=True, fmt='none', label=MDnames[index])
print( len(inTol.nonzero()[0]), len(fd), 100.*len(inTol.nonzero()[0])/ len(fd) )
p.axhline(1.01,c='k',ls='--',label=r'syst $\pm1\%$')
p.axhline(0.99,c='k',ls='--')
p.xlabel(r'$log_{10}(\nu)$')
p.ylabel(r'data/model')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
#p.xlim((-0.7,0.6))
p.ylim((0.92,1.08))
#p.yscale('log')
p.grid()
p.savefig(join(dir,"fit-"+cos+"-differential-function-residual-log.png"))
p.clf()
return pOpt, pCov
# VMAX 1point FUNCTION
def plot_jackknife_poisson_error(x, y, MD04, MD10, MD25, MD25NW, MD40, MD40NW, DS80, cos = "cen", dir=join(os.environ['MVIR_DIR'])):
"""
:param x: x coordinates
:param y: y coordinates
:param cos: centra or satelitte. Default: "cen"
:param dir: working directory. :param qty: quantity studied. Default: 'vmax'
"""
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
p.plot(x, y, 'k,', label='all', alpha=0.5)
p.plot(x[MD04], y[MD04],marker='x',label="MD04",ls='')
p.plot(x[MD10], y[MD10],marker='+',label="MD10",ls='')
p.plot(x[MD25], y[MD25],marker='^',label="MD25",ls='')
p.plot(x[MD40], y[MD40],marker='v',label="MD40",ls='')
p.plot(x[MD25NW], y[MD25NW],marker='1',label="MD25NW",ls='')
p.plot(x[MD40NW], y[MD40NW],marker='2',label="MD40NW",ls='')
p.plot(x[DS80], y[DS80],marker='3',label="DS80",ls='')
xx = n.logspace(-4,0,20)
p.plot(xx, xx*3., ls='--', label='y=3x')
#p.axhline(Npmin**-0.5, c='r', ls='--', label='min counts cut')#r'$1/\sqrt{10^3}$')
#p.axhline((10**6.87)**-0.5, c='k', ls='--', label='min vmax cut')#r'$1/\sqrt{10^{4.87}}$')
p.xlim((1e-4,1))
p.ylim((1e-4,1))
p.ylabel(r'$1/\sqrt{count} \; [\%]$')
p.xlabel(r'Jackknife Resampling Error [%]')
p.yscale('log')
p.xscale('log')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join(dir,"jackknife-countsSqrt-"+cos+".png"))
p.clf()
def fit_mvir_function_zTrend(data, x_data, y_data, z_data , y_err, ps0=[0., 0., 0.], tolerance = 0.03, cos = "cen", mode = "curve_fit", dir=join(os.environ['MVIR_DIR']), zmin=0, zmax=2.5):
"""
Fits a function to the mvir data
:param data: data table of the selected points for the fit
:param x_data: x coordinate
:param y_data: y coordinate
:param z_data: redshift coordinate
:param y_err: error
:param ps0: first guess
:param tolerance: percentage error tolerance to compute how many points are outside of the fit
:param cos: central or satelitte
:param mode: fitting mode, "curve_fit" or "minimize"
:param dir: working dir
:param qty: mvir here
:return: result of the fit: best parameter array and covariance matrix
produces a plot of the residuals
"""
outfile=open(join(dir,"mvir-"+cos+"-diff-function-z0-params.pkl"), 'r')
pOpt, pCov = pickle.load(outfile)
outfile.close()
A0, a0, p0 = pOpt
Az = lambda z, A1 : A0+z*A1
az = lambda z, a1 : a0+z*a1
pz = lambda z, p1 : p0+z*p1
delta_c=1.686
Anorm0=0.333
fnu_SMT = lambda nu, Anorm, a, q : Anorm * (2./n.pi)**(0.5) * (a*nu) * ( 1 + (a*nu) **(-2*q) ) * n.e**( - (a*nu)**2. / 2.)
log_fnu_ST01 = lambda logNu, Anorm, a, q : n.log10( fnu_SMT(10.**logNu, Anorm, a, q) )
log_fnu_ST01_ps = lambda logSigma, ps : n.log10( fnu_SMT(10.**logNu, ps[0], ps[1], ps[2]) )
f_SMT_z = lambda sigma, z, A1, a1, p1: Az(z, A1) * (2.*az(z, a1)/n.pi)**(0.5) * ( 1 + ((delta_c/sigma)**2./az(z, a1)) **(pz(z, p1)) ) * n.e**( - az(z, a1) * (delta_c/sigma)**2. / 2.) * (delta_c/sigma)
log_f_ST01_zt_ps = lambda logSigma, z, ps : n.log10( f_SMT_z(10.**logSigma, z, ps[0], ps[1], ps[2]) )
print( "mode: minimize" )
chi2fun = lambda ps : n.sum( (log_f_ST01_zt_ps(x_data, z_data, ps) - y_data)**2. / (y_err)**2. )/(len(y_data) - len(ps0))
res = minimize(chi2fun, ps0, method='Powell',options={'xtol': 1e-8, 'disp': True, 'maxiter' : 5000000000000})
pOpt = res.x
pCov = res.direc
print( "best params=",pOpt )
print( "err=",pCov.diagonal()**0.5 )
#x_model = n.arange(n.min(x_data),n.max(x_data),0.005)
#y_model = log_f_ST01_zt_ps(x_model, pOpt)
#n.savetxt(join(dir,"mvir-"+cos+"-differential-function-zTrend-model-pts.txt"),n.transpose([x_model, y_model]) )
outfile=open(join(dir,"mvir-"+cos+"-diff-function-zTrend-params.pkl"), 'w')
pickle.dump([pOpt, pCov], outfile)
outfile.close()
f_diff = y_data - log_f_ST01_zt_ps(x_data, z_data, pOpt)
MD_sel_fun=lambda name : (data["boxName"]==name)
MDnames= n.array(['M04', 'M10', 'M25','M40','M25n','M40n'])
MDsels=n.array([MD_sel_fun(name) for name in MDnames])
f_diff_fun = lambda MDs: y_data[MDs] - log_f_ST01_zt_ps(x_data[MDs], z_data[MDs], pOpt)
f_diffs = n.array([f_diff_fun(MD) for MD in MDsels])
print("================================" )
# now the plots
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
for index, fd in enumerate(f_diffs):
inTol = (abs(10**fd-1)<tolerance)
print( index )
if len(fd)>0:
p.errorbar(x_data[MDsels[index]], 10**fd, yerr = y_err[MDsels[index]] , rasterized=True, fmt='none', label=MDnames[index])
print( len(inTol.nonzero()[0]), len(fd), 100.*len(inTol.nonzero()[0])/ len(fd) )
p.axhline(1.01,c='k',ls='--',label=r'syst $\pm1\%$')
p.axhline(0.99,c='k',ls='--')
p.xlabel(r'$log_{10}(\sigma)$')
p.ylabel(r'data/model')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
#p.xlim((-0.7,0.6))
p.ylim((0.9,1.1))
#p.yscale('log')
p.grid()
p.savefig(join(dir,"mvir-"+cos+"-differential-function-fit-ztrend-residual-log.png"))
p.clf()
p.figure(1,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(x_data, log_f_ST01_zt_ps(x_data, z_data, pOpt), c=z_data, s=5, marker='o',label="MD "+cos+" model", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'$log_{10}(\sigma)$')
p.ylabel(r'model Mvir Function')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join(dir,"mvir-"+cos+"-differential-function-fit-ztrend-model.png"))
p.clf()
return pOpt, pCov
def plot_vmax_function_data_error(log_vmax, error, redshift, label, zmin, zmax, cos = "cen", figName="vmax-cen-data04-uncertainty.png", dir=join(os.environ['VMAX_DIR'])):
"""
:param log_vmax: x coordinates
:param error: y coordinates
:param redshift: color coordinate
:param label: label in the caption
:param zmin: minimum redshift
:param zmax: maximum redshift
:param cos: centra or satelitte. Default: "cen"
:param figName: string to be added to the figure name. Default:=""
:param dir: working directory. :param qty: quantity studied. Default: 'vmax'
"""
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(log_vmax, 100*error, c=redshift, s=5, marker='o',label=label, rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[V_{max}/(km \; s^{-1})]$')
p.ylabel(r'JK relative error [%]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((2e-2,30))
#p.xlim((1.5, 3.5))
p.yscale('log')
p.grid()
p.savefig(join(dir,figName))
p.clf()
def plot_vmax_function_data(log_vmax, log_VF, log_VF_c, redshift, zmin, zmax, cos = "cen", figName="", dir=join(os.environ['VMAX_DIR'])):
"""
:param log_vmax: x coordinates
:param log_VF: y coordinates
:param redshift: color coordinate
:param zmin: minimum redshift
:param zmax: maximum redshift
:param cos: centra or satelitte. Default: "cen"
:param figName: string to be added to the figure name. Default:=""
:param dir: working directory. :param qty: quantity studied. Default: 'vmax'
"""
# now the plots
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(log_vmax, log_VF, c=redshift, s=5, marker='o',label="MD "+cos+" data", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[V_{max}/(km \; s^{-1})]$')
p.ylabel(r'log$_{10} [V^3/H^3(z)\; dn(V)/dlnV]$') # log$_{10}[ n(>M)]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
p.ylim((-5.5,0))
#p.xlim((1.5, 3.5))
#p.ylim((-3.5,-1))
p.grid()
p.savefig(join(dir,"vmax-"+figName+cos+"-differential-function-data.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(log_vmax, log_VF_c, c=redshift, s=5, marker='o',label="MD "+cos+" data", rasterized=True, vmin=zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[V_{max}/(km \; s^{-1})]$')
p.ylabel(r'log$_{10} [V^3/H^3(z)\; n(>V)]$') # log$_{10}[ n(>M)]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
#p.ylim((-8,1))
#p.xlim((1.5, 3.5))
#p.yscale('log')
p.grid()
p.savefig(join(dir,"vmax-"+figName+cos+"-cumulative-function-data.png"))
p.clf()
def plot_vmax_function_data_perBox(log_vmax, log_VF, log_VF_c, MD04, MD10, MD25, MD25NW, MD40, MD40NW, cos = "cen", figName="", dir=join(os.environ['VMAX_DIR'])):
"""
:param log_vmax: x coordinates
:param log_VF: y coordinates
:param cos: centra or satelitte. Default: "cen"
:param figName: string to be added to the figure name. Default:=""
:param dir: working directory. :param qty: quantity studied. Default: 'vmax'
"""
# now the plots
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
p.plot(log_vmax[MD04], log_VF[MD04],marker='1',label="MD04",ls='')
p.plot(log_vmax[MD10], log_VF[MD10],marker='2',label="MD10",ls='')
p.plot(log_vmax[MD25], log_VF[MD25],marker='|',label="MD25",ls='')
p.plot(log_vmax[MD40], log_VF[MD40],marker='_',label="MD40",ls='')
p.plot(log_vmax[MD25NW], log_VF[MD25NW],marker='+',label="MD25NW",ls='')
p.plot(log_vmax[MD40NW], log_VF[MD40NW],marker='x',label="MD40NW",ls='')
p.xlabel(r'log$_{10}[V_{max}/(km \; s^{-1})]$')
p.ylabel(r'log$_{10} [(V^3/H^3(z)\; dn(V)/dlnV]$') # log$_{10}[ n(>M)]')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
#p.ylim((-8,1))
#p.xlim((1.5, 3.5))
p.ylim((-5.5,0))
p.grid()
p.savefig(join(dir,"vmax-"+figName+cos+"-differential-function-data-perBox.png"))
p.clf()
def fit_vmax_function_z0(data, x_data, y_data , y_err, p0, tolerance = 0.03, cos = "cen", mode = "curve_fit", dir=join(os.environ['VMAX_DIR']), suffix='0'):
"""
Fits a function to the vmax data
:param data: data table of the selected points for the fit
:param x_data: x coordinate
:param y_data: y coordinate
:param y_err: error
:param p0: first guess
:param tolerance: percentage error tolerance to compute how many points are outside of the fit
:param cos: central or satelitte
:param mode: fitting mode, "curve_fit" or "minimize"
:param dir: working dir
:param qty: vmax here
:return: result of the fit: best parameter array and covariance matrix
produces a plot of the residuals
"""
chi2fun = lambda ps : n.sum( (vf_ps(x_data, ps) - y_data)**2. / (y_err)**2. )/(len(y_data) - len(ps))
if mode == "curve_fit":
print("mode: curve_fit" )
pOpt, pCov=curve_fit(vf, x_data, y_data, p0, y_err, maxfev=500000000)#, bounds=boundaries)
print("best params=",pOpt[0], pOpt[1], pOpt[2], pOpt[3] )
print("err=",pCov[0][0]**0.5, pCov[1][1]**0.5, pCov[2][2]**0.5, pCov[3][3]**0.5 )
print("Rchi2, ndof, chi2", chi2fun(pOpt), len(x_data)-len(pOpt), chi2fun(pOpt)*( len(x_data)-len(pOpt) ) )
if mode == "minimize":
print("mode: minimize" )
res = minimize(chi2fun, p0, method='Powell',options={'xtol': 1e-8, 'disp': True, 'maxiter' : 5000000000000})
pOpt = res.x
pCov = res.direc
print("best params=",pOpt[0], pOpt[1], pOpt[2], pOpt[3] )
print("err=",pCov[0][0]**0.5, pCov[1][1]**0.5, pCov[2][2]**0.5, pCov[3][3]**0.5 )
print("Rchi2, ndof, chi2", chi2fun(pOpt), len(x_data)-len(pOpt), chi2fun(pOpt)*( len(x_data)-len(pOpt) ) )
x_model = n.arange(n.min(x_data),n.max(x_data),0.005)
y_model = vf(x_model, pOpt[0], pOpt[1], pOpt[2], pOpt[3])
n.savetxt(join(dir,"vmax-"+cos+"-differential-function-z0-model-pts.txt"),n.transpose([x_model, y_model]) )
outfile=open(join(dir,"vmax-"+cos+"-diff-function-params-"+suffix+".pkl"), 'w')
pickle.dump([pOpt, pCov], outfile)
outfile.close()
f_diff = y_data - vf(x_data, pOpt[0], pOpt[1], pOpt[2], pOpt[3])
MD_sel_fun=lambda name : (data["boxName"]==name)
MDnames= n.array(['M04', 'M10', 'M25','M40','M25n','M40n'])
MDsels=n.array([MD_sel_fun(name) for name in MDnames])
f_diff_fun = lambda MDs: y_data[MDs] - vf(x_data[MDs], pOpt[0], pOpt[1], pOpt[2], pOpt[3])
f_diffs = n.array([f_diff_fun(MD) for MD in MDsels])
print("================================" )
# now the plots
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
for index, fd in enumerate(f_diffs):
inTol = (abs(10**fd-1)<tolerance)
print(index )
if len(fd)>0:
p.errorbar(x_data[MDsels[index]], 10**fd, yerr = y_err[MDsels[index]] , rasterized=True, fmt='none', label=MDnames[index])
print(len(inTol.nonzero()[0]), len(fd), 100.*len(inTol.nonzero()[0])/ len(fd))
p.axhline(1.01,c='k',ls='--',label=r'syst $\pm1\%$')
p.axhline(0.99,c='k',ls='--')
p.xlabel(r'$log(V_{max})$')
p.ylabel(r'data/model')
gl = p.legend(loc=0,fontsize=10)
gl.set_frame_on(False)
#p.xlim((-0.7,0.6))
p.ylim((0.8,1.2))
#p.yscale('log')
p.grid()
p.savefig(join(dir,"vmax-"+cos+"-differential-function-fit-residual-log.png"))
p.clf()
return pOpt, pCov
# MULTIDARK DATA OUTPUT HANDLING
def getStat(file,volume,unitVolume):
"""
From the pickle file output by the Multidark class, we output the number counts (differential and cumulative) per unit volume per mass bin.
:param file: filename
:param volume: total volume of the box
:param unitVolume: sub volume used in the jackknife
:return: Number counts, cumulative number counts, count density, cumulative count density, jackknife mean, jackknife std, cumulative jackknife mean, cumulative jackknife std
"""
# print(file
data=pickle.load(open(file,'r'))
data_c = n.array([n.array([ n.sum(el[ii:]) for ii in range(len(el)) ]) for el in data])
Ncounts = data.sum(axis=0)
Ncounts_c = data_c.sum(axis=0) # n.array([ n.sum(Ncounts[ii:]) for ii in range(len(Ncounts)) ])
Nall = Ncounts / volume
Nall_c = Ncounts_c / volume
index=n.arange(int(data.shape[0]))
n.random.shuffle( index )
Ntotal = int(data.shape[0])
# discard 100
def get_mean_std( pcDiscard = 0.1):
"""
retrieves the mean and std from the jackknife
:param pcDiscard:percentage to discard for the jackknife
:return: mean, std, cumulative mean90 and cumulative std
"""
Ndiscard = Ntotal * pcDiscard
resamp = n.arange(0,Ntotal+1, Ndiscard)
N90 = n.array([n.sum(data[n.delete(n.arange(Ntotal), index[resamp[i]:resamp[i+1]])], axis=0) for i in range(len(resamp)-1)]) / (unitVolume*(Ntotal - Ndiscard) )
mean90 = n.mean(N90, axis=0)
std90 = n.std(N90, axis=0) / mean90
N90_c = n.array([n.sum(data_c[n.delete(n.arange(Ntotal), index[resamp[i]:resamp[i+1]])], axis=0) for i in range(len(resamp)-1)]) / (unitVolume*(Ntotal - Ndiscard) )
mean90_c = n.mean(N90_c, axis=0)
std90_c = n.std(N90_c, axis=0) / mean90_c
return mean90, std90, mean90_c, std90_c
mean90, std90, mean90_c, std90_c = get_mean_std(0.1)
#mean99, std99, mean99_c, std99_c = getMS(0.01)
#sel = Nall>1/volume
#print(std99[sel]/std90[sel] )
#print(mean90[sel]/mean99[sel])
#print(Nall[sel]/mean99[sel] )
#print(Nall[sel]/mean90[sel] )
return Ncounts, Ncounts_c, Nall, Nall_c, mean90, std90, mean90_c, std90_c
def get_hf(sigma_val=0.8228, boxRedshift=0., delta_wrt='mean'):
"""
Halo mass function model for the MultiDark simulation.
"""
#hf0 = MassFunction(cosmo_model=cosmo, sigma_8=sigma_val, z=boxRedshift)
omega = lambda zz: cosmoMD.Om0*(1+zz)**3. / cosmoMD.efunc(zz)**2
DeltaVir_bn98 = lambda zz : (18.*n.pi**2. + 82.*(omega(zz)-1)- 39.*(omega(zz)-1)**2.)/omega(zz)
print("DeltaVir", DeltaVir_bn98(boxRedshift), " at z",boxRedshift )
hf1 = MassFunction(cosmo_model=cosmoMD, sigma_8=sigma_val, z=boxRedshift, delta_h=DeltaVir_bn98(boxRedshift), delta_wrt=delta_wrt, Mmin=7, Mmax=16.5)
return hf1
def get_hf_ds(sigma_val=0.8355, boxRedshift=0., delta_wrt='mean'):
"""
Halo mass function model for the Darkskies simulation.
"""
#hf0 = MassFunction(cosmo_model=cosmo, sigma_8=sigma_val, z=boxRedshift)
omega = lambda zz: cosmoDS.Om0*(1+zz)**3. / cosmoDS.efunc(zz)**2
DeltaVir_bn98 = lambda zz : (18.*n.pi**2. + 82.*(omega(zz)-1)- 39.*(omega(zz)-1)**2.)/omega(zz)
print("DeltaVir", DeltaVir_bn98(boxRedshift), " at z",boxRedshift )
hf1 = MassFunction(cosmo_model=cosmoDS, sigma_8=sigma_val, z=boxRedshift, delta_h=DeltaVir_bn98(boxRedshift), delta_wrt=delta_wrt, Mmin=7, Mmax=16.5)
return hf1
def get_basic_info(fileC, boxZN, delta_wrt='mean'):
"""
For a HMF measurement, this function returns all the basic information about the simulation used:
- a HMF model with the corrected sigma8 value: hf,
- size of the box: boxLength, boxLengthComoving
- name of the simulation used later when writing results boxName
- the redshiftof the measured HMF: boxRedshift, 7
- the logmass of one particle: logmp,
- correction to the mass measured due to force resolution: massCorrection
"""
if fileC.find('MD_0.4Gpc')>0:
boxName='MD_0.4Gpc'
nSN, aSN = n.loadtxt(join(os.environ['MD04_DIR'],"redshift-list.txt"), unpack=True, dtype={'names': ('nSN', 'aSN'), 'formats': ('i4', 'f4')})
conversion = dict(n.transpose([ nSN, 1/aSN-1 ]))
boxRedshift = conversion[boxZN]
hf = get_hf(0.8228*0.953**0.5, boxRedshift, delta_wrt=delta_wrt)
# hf_ref = get_hf(0.8228, boxRedshift, delta_wrt=delta_wrt)
logmp = n.log10(9.63 * 10**7/cosmoMD.h)
boxLength = 400./cosmoMD.h/cosmoMD.efunc(boxRedshift)
massCorrection = 1. - 0.0002
boxLengthComoving = 400.
elif fileC.find('MD_1Gpc')>0 :
boxName='MD_1Gpc'
nSN, aSN = n.loadtxt(join(os.environ['MD10_DIR'],"redshift-list.txt"), unpack=True, dtype={'names': ('nSN', 'aSN'), 'formats': ('i4', 'f4')})
conversion = dict(n.transpose([ nSN, 1/aSN-1 ]))
boxRedshift = conversion[boxZN]
#boxRedshift = 1./boxZN - 1.
hf = get_hf(0.8228*1.004**0.5, boxRedshift, delta_wrt=delta_wrt)
# hf_ref = get_hf(0.8228, boxRedshift, delta_wrt=delta_wrt)
logmp = n.log10(1.51 * 10**9/cosmoMD.h)
boxLength = 1000./cosmoMD.h/cosmoMD.efunc(boxRedshift)
massCorrection = 1. - 0.0005
boxLengthComoving = 1000.
elif fileC.find('MD_2.5GpcNW')>0 :
boxName='MD_2.5GpcNW'
nSN, aSN = n.loadtxt(join(os.environ['MD25NW_DIR'],"redshift-list.txt"), unpack=True, dtype={'names': ('nSN', 'aSN'), 'formats': ('i4', 'f4')})
conversion = dict(n.transpose([ nSN, 1/aSN-1 ]))
boxRedshift = conversion[boxZN]
hf = get_hf(0.8228*1.01**0.5, boxRedshift, delta_wrt=delta_wrt)
# hf_ref = get_hf(0.8228, boxRedshift, delta_wrt=delta_wrt)
hz = 1.# hf.cosmoMD.H( boxRedshift ).value / 100.
logmp = n.log10(2.359 * 10**10/cosmoMD.h )
boxLength = 2500./cosmoMD.h/cosmoMD.efunc(boxRedshift)
massCorrection = 1. - 0.001
boxLengthComoving = 2500.
elif fileC.find('MD_4GpcNW')>0 :
boxName='MD_4GpcNW'
nSN, redshift40, aSN = n.loadtxt(join(os.environ['MD40NW_DIR'],"redshift-list.txt"), unpack=True, dtype={'names': ('nSN', 'redshift', 'aSN'), 'formats': ('i4', 'f4', 'f4')})
conversion = dict(n.transpose([ nSN, redshift40 ]))
boxRedshift = conversion[boxZN]
hf = get_hf(0.8228*1.008**0.5, boxRedshift, delta_wrt=delta_wrt)
# hf_ref = get_hf(0.8228, boxRedshift, delta_wrt=delta_wrt)
hz = 1.# hf.cosmoMD.H( boxRedshift ).value / 100.
logmp = n.log10(9.6 * 10**10/cosmoMD.h )
boxLength = 4000./cosmoMD.h/cosmoMD.efunc(boxRedshift)
boxLengthComoving = 4000.
massCorrection = 1. - 0.003
elif fileC.find('MD_2.5Gpc')>0 :
boxName='MD_2.5Gpc'
nSN, aSN, redshift25 = n.loadtxt(join(os.environ['MD25_DIR'],"redshift-list.txt"), unpack=True, dtype={'names': ('nSN', 'aSN', 'redshift'), 'formats': ('i4', 'f4', 'f4')})
conversion = dict(n.transpose([ nSN, redshift25 ]))
boxRedshift = conversion[boxZN]
hf = get_hf(0.8228*1.01**0.5, boxRedshift, delta_wrt=delta_wrt)
# hf_ref = get_hf(0.8228, boxRedshift, delta_wrt=delta_wrt)
hz = 1.# hf.cosmoMD.H( boxRedshift ).value / 100.
logmp = n.log10(2.359 * 10**10/cosmoMD.h)
boxLength = 2500./cosmoMD.h/cosmoMD.efunc(boxRedshift)
boxLengthComoving = 2500.
massCorrection = 1. - 0.001
elif fileC.find('MD_4Gpc')>0 :
boxName='MD_4Gpc'
nSN, redshift40, aSN = n.loadtxt(join(os.environ['MD40_DIR'],"redshift-list.txt"), unpack=True, dtype={'names': ('nSN', 'redshift', 'aSN'), 'formats': ('i4', 'f4', 'f4')})
conversion = dict(n.transpose([ nSN, redshift40 ]))
boxRedshift = conversion[boxZN]
hf = get_hf(0.8228*1.008**0.5, boxRedshift, delta_wrt=delta_wrt)
# hf_ref = get_hf(0.8228, boxRedshift, delta_wrt=delta_wrt)
hz = 1.# hf.cosmoMD.H( boxRedshift ).value / 100.
logmp = n.log10(9.6 * 10**10 /cosmoMD.h )
boxLength = 4000./cosmoMD.h/cosmoMD.efunc(boxRedshift)
boxLengthComoving = 4000.
massCorrection = 1. - 0.003
elif fileC.find('ds14_0')>0 :
boxName='DS_8Gpc'
boxRedshift = 0.
hf = get_hf_ds(0.8355, boxRedshift, delta_wrt=delta_wrt)
# hf_ref = get_hf(0.8228, boxRedshift, delta_wrt=delta_wrt)
hz = 1.# hf.cosmoMD.H( boxRedshift ).value / 100.
logmp = n.log10(9.6 * 10**10 /cosmoDS.h )
boxLength = 8000./cosmoMD.h/cosmoDS.efunc(boxRedshift)
boxLengthComoving = 8000.
massCorrection = 1. #- 0.003
return hf, boxLength, boxName, boxRedshift, logmp, boxLengthComoving, massCorrection
def convert_pkl_mass(fileC, fileS, binFile, qty='mvir', delta_wrt='mean'):
"""
Creates a fits file with each measurements made. It linksglobal parameters to the HMF values.
:param qty: one point function variable.Default: mvir.
:param fileC: file with the central halo statistics
:param fileS: file with the satelitte halo statistics
:param binFile: file with the bins
:return: a fits table containing the one point function histograms
"""
#print("qty", qty )
boxZN = float(os.path.basename(fileC).split('_')[1])
#print(boxZN )
extraName = os.path.basename(fileS)[:-27]
hf, boxLength, boxName, boxRedshift, logmp, boxLengthComoving, massCorrection = get_basic_info(fileC, boxZN, delta_wrt='mean')
#print(boxName )
bins = n.log10( 10**n.loadtxt(binFile) * massCorrection )
#bins = n.log10( 10**bins_in / hz )
#bins = n.loadtxt(binFile)
logmass = ( bins[1:] + bins[:-1] )/2.
mass = 10**logmass
dX = ( 10**bins[1:] - 10**bins[:-1] )
#dlnbin = dX / mass
dlnbin = (bins[1:] - bins[:-1])*n.log(10)
#print(dX / mass, dlnbin )
#selects meaningful masses 10 times particle mass
ok = (logmass > logmp+1.0)&(logmass<16.1)
#print("bins", len(bins), bins
#print("bins[ok]", len(bins[:-1][ok]), bins[:-1][ok]
hz = cosmoMD.H( boxRedshift ).value / 100.
# m sigma relation using the sigma8 corrected power spectrum
m2sigma = interp1d(hf.M, hf.sigma )
sig = m2sigma( mass )
# m nu relation: nu = (delta_c / sigma_m)**2
m2nu = interp1d(hf.M, hf.nu )
nnu = m2nu( mass )
# jacobian
toderive = interp1d(n.log(hf.M), n.log(hf.sigma))
dlnsigmadlnm = derivative(toderive, n.log(mass) )
#normalization by average universedensity at this redshift in the right cosmo
rhom_units = cosmoMD.Om(boxRedshift)*cosmoMD.critical_density(boxRedshift).to(u.solMass/(u.Mpc)**3.)#/(cosmoMD.h)**2.
# in units (Msun/h) / (Mpc/h)**3
rhom = rhom_units.value # hf.mean_density#/(hz)**2.
#print(hf.mean_density, rhom / (hf.mean_density*(cosmoMD.h)**2.)
col0 = fits.Column( name="boxName",format="14A", array= n.array([boxName for i in range(len(bins[:-1][ok]))]))
col1 = fits.Column( name="redshift",format="D", array= boxRedshift * n.ones( len(bins[:-1][ok]) ) )
col1b = fits.Column( name="h",format="D", array= hz * n.ones( len(bins[:-1][ok]) ) )
col2a = fits.Column( name="boxLengthProper",format="D", array= boxLength * n.ones( len(bins[:-1][ok]) ) )
col2b = fits.Column( name="boxLengthComoving",format="D", array= boxLengthComoving * n.ones( len(bins[:-1][ok]) ) )
col3 = fits.Column( name="logMpart",format="D", array= logmp* n.ones( len(bins[:-1][ok]) ) )
col4 = fits.Column( name="rhom",format="D", array= rhom* n.ones( len(bins[:-1][ok]) ) )
#print("bin test", bins[:-1][ok]
col5_0 = fits.Column( name="log_"+qty+"_min",format="D", array= bins[:-1][ok] )
col5_1 = fits.Column( name="log_"+qty+"_max",format="D", array= bins[1:][ok] )
col5_2 = fits.Column( name="log_"+qty,format="D", array= logmass[ok])
#print("test2", len( logmass[ok]), logmass[ok]
#print("columns", col5_0, col5_1, col5_2
#print("array", col5_0.array, col5_1.array, col5_2.array
col5_3 = fits.Column( name="sigmaM",format="D", array= sig[ok] )
col5_4 = fits.Column( name="nu2",format="D", array= nnu[ok])#hf.delta_c/sig ) # hf.growth_factor/
col5_5 = fits.Column( name="dlnsigmaMdlnM",format="D", array= dlnsigmadlnm[ok] )
unitVolume = (boxLength *0.10)**3. # /cosmoMD.H(boxRedshift)
volume = (boxLength)**3.
Ncounts, Ncounts_c, Nall, Nall_c, mean90, std90, mean90_c, std90_c = getStat(fileC,volume,unitVolume)
col6c = fits.Column( name="dN_counts_cen",format="D", array= Ncounts[ok] )
col6cc = fits.Column( name="dN_counts_cen_c",format="D", array= Ncounts_c[ok])
col7c = fits.Column( name="dNdV_cen",format="D", array= Nall[ok] )
col7cc = fits.Column( name="dNdV_cen_c",format="D", array= Nall_c[ok] )
col8c = fits.Column( name="dNdlnM_cen",format="D", array= Nall[ok]/dlnbin[ok] )
col8cc = fits.Column( name="dNdlnM_cen_c",format="D", array= Nall_c[ok]/dlnbin[ok] )
col9c = fits.Column( name="std90_pc_cen",format="D", array= std90[ok] )
col9cc = fits.Column( name="std90_pc_cen_c",format="D", array= std90_c[ok] )
Ncounts, Ncounts_c, Nall, Nall_c, mean90, std90, mean90_c, std90_c = getStat(fileS,volume,unitVolume)
col6s = fits.Column( name="dN_counts_sat",format="D", array= Ncounts[ok] )
col6sc = fits.Column( name="dN_counts_sat_c",format="D", array= Ncounts_c[ok])
col7s = fits.Column( name="dNdV_sat",format="D", array= Nall[ok] )
col7sc = fits.Column( name="dNdV_sat_c",format="D", array= Nall_c[ok] )
col8s = fits.Column( name="dNdlnM_sat",format="D", array= Nall[ok]/dlnbin[ok] )
col8sc = fits.Column( name="dNdlnM_sat_c",format="D", array= Nall_c[ok] / dlnbin[ok] )
col9s = fits.Column( name="std90_pc_sat",format="D", array= std90[ok] )
col9sc = fits.Column( name="std90_pc_sat_c",format="D", array= std90_c[ok] )
tbhdu = fits.BinTableHDU.from_columns([col0, col1,col1b, col2a, col2b, col3, col4, col5_0, col5_1, col5_2, col5_3, col5_4, col5_5, col6c, col7c, col8c, col9c, col6cc, col7cc, col8cc, col9cc, col6s, col7s, col8s, col9s, col6sc, col7sc, col8sc, col9sc ])
prihdr = fits.Header()
prihdr['AUTHOR'] = 'J. Comparat'
prihdr['DATE'] = time.time()
prihdu = fits.PrimaryHDU(header=prihdr)
thdulist = fits.HDUList([prihdu, tbhdu])
writeName = join(os.environ['MVIR_DIR'], "data", boxName+"_"+str(boxRedshift)+"_"+qty+".fits")
print(writeName)
if os.path.isfile(writeName):
os.remove(writeName)
thdulist.writeto(writeName)
#return hf
def convert_pkl_massFunction_covarianceMatrix(fileC, binFile, qty='mvir', delta_wrt='mean'):#, gt14=True):
"""
Return a mass function covariance matrix
:param qty: one point function variable.Default: mvir.
:param file: file with the central halo statistics
:param binFile: file with the bins
:return: a fits table containing the one point function histograms
"""
print(fileC)
boxZN = float(os.path.basename(fileC).split('_')[1])
hf, boxLength, boxName, boxRedshift, logmp, boxLengthComoving, massCorrection = get_basic_info(fileC, boxZN, delta_wrt='mean')
bins = n.log10( 10**n.loadtxt(binFile) * massCorrection )
logmass = ( bins[1:] + bins[:-1] )/2.
mass = 10**logmass
dX = ( 10**bins[1:] - 10**bins[:-1] )
dlnbin = abs((bins[1:] - bins[:-1])*n.log(10))
#selects meaningful masses 10 times particle mass
hz = cosmoMD.H( boxRedshift ).value / 100.
# m sigma relation using the sigma8 corrected power spectrum
m2sigma = interp1d(hf.M, hf.sigma )
sig = m2sigma( mass )
# m nu relation: nu = (delta_c / sigma_m)**2
m2nu = interp1d(hf.M, hf.nu )
nnu = m2nu( mass )
# jacobian
toderive = interp1d(n.log(hf.M), n.log(hf.sigma))
dlnsigmadlnm = abs(derivative(toderive, n.log(mass) ))
#normalization by average universedensity at this redshift in the right cosmo
rhom_units = cosmoMD.Om(boxRedshift)*cosmoMD.critical_density(boxRedshift).to(u.solMass/(u.Mpc)**3.)
rhom = rhom_units.value # hf.mean_density#/(hz)**2.
unitVolume = (boxLength *0.10)**3.
volume = (boxLength)**3.
data_i=pickle.load(open(fileC,'r'))
Ncounts_i = data_i.sum(axis=0)
ok = (logmass > logmp+3.0)&(logmass<16.1)&(Ncounts_i>10)
"""
if gt14:
ok = (logmass > 13.9)&(logmass<16.1)&(Ncounts_i>10)
else:
ok = (logmass > logmp+3.0)&(logmass<13.9)&(Ncounts_i>10)
"""
data = data_i.T[ok].T
#print(data.shape
Ncounts = data.sum(axis=0)
#print(Ncounts.shape
Nall = Ncounts / volume
#print(Nall.shape
dNdlnM = Nall/dlnbin[ok]
#print(dNdlnM.shape
count_matrix = n.outer(Ncounts, Ncounts)/float(data.shape[0])
#print(count_matrix.shape
dNdlnM_mat = data/unitVolume/dlnbin[ok]
#print(dNdlnM_mat.shape
f_mean = mass[ok] * dNdlnM / rhom / dlnsigmadlnm[ok]
#print(f_mean.shape
f_matrix = mass[ok] * dNdlnM_mat / rhom / dlnsigmadlnm[ok]
#print(f_matrix.shape
#print(boxName
return [f_mean, f_matrix, count_matrix, sig[ok], mass[ok]], boxName
def convert_pkl_velocity(fileC, fileS, binFile, qty='vmax'):
"""
:param qty: one point function variable. Default vmax.
:param fileC: file with the central halo statistics
:param fileS: file with the satelitte halo statistics
:param binFile: file with the bins
:param zList_files: list of file with linking snapshot number and redshift
:return: a fits table containing the one point function histograms
"""
print("qty", qty)
boxZN = float(os.path.basename(fileC).split('_')[1])
print(boxZN )
extraName = os.path.basename(fileS)[:-27]
hf, boxLength, boxName, boxRedshift, logmp, boxLengthComoving, massCorrection = get_basic_info(fileC, boxZN, delta_wrt='mean')
bins = n.loadtxt(binFile)
vmax = ( bins[1:] + bins[:-1] )/2.
dX = ( bins[1:] - bins[:-1] )
dlnbin = dX / vmax
hz = cosmoMD.H( boxRedshift ).value / 100.
rhom_units = cosmoMD.Om(boxRedshift)*cosmoMD.critical_density(boxRedshift).to(u.solMass/(u.Mpc)**3.)#/(cosmoMD.h)**2.
# in units (Msun/h) / (Mpc/h)**3
rhom = rhom_units.value
col0 = fits.Column( name="boxName",format="14A", array= n.array([boxName for i in range(len(bins[:-1]))]))
col1 = fits.Column( name="redshift",format="D", array= boxRedshift * n.ones( len(bins[:-1]) ) )
col1b = fits.Column( name="h",format="D", array= hz * n.ones( len(bins[:-1]) ) )
col2a = fits.Column( name="boxLengthProper",format="D", array= boxLength * n.ones( len(bins[:-1]) ) )
col2b = fits.Column( name="boxLengthComoving",format="D", array= boxLengthComoving * n.ones( len(bins[:-1]) ) )
col3 = fits.Column( name="logMpart",format="D", array= logmp* n.ones( len(bins[:-1]) ) )
col4 = fits.Column( name="rhom",format="D", array= rhom* n.ones( len(bins[:-1]) ) )
col5_0 = fits.Column( name=qty+"_min",format="D", array= bins[:-1] )
col5_1 = fits.Column( name=qty+"_max",format="D", array= bins[1:] )
col5_2 = fits.Column( name=qty, format="D", array= vmax)
unitVolume = (boxLength*0.10)**3.
volume = (boxLength)**3.
Ncounts, Ncounts_c, Nall, Nall_c, mean90, std90, mean90_c, std90_c = getStat(fileC,volume,unitVolume)
col5 = fits.Column( name="dN_counts_cen",format="D", array= Ncounts )
col6 = fits.Column( name="dN_counts_cen_c",format="D", array= Ncounts_c)
col7 = fits.Column( name="dNdV_cen",format="D", array= Nall )
col8 = fits.Column( name="dNdV_cen_c",format="D", array= Nall_c )
col9 = fits.Column( name="dNdVdlnV_cen",format="D", array= Nall/dlnbin )
col10 = fits.Column( name="dNdVdlnV_cen_c",format="D", array= Nall_c/dlnbin )
col11 = fits.Column( name="std90_pc_cen",format="D", array= std90 )
col12 = fits.Column( name = "std90_pc_cen_c", format="D", array= std90_c )
Ncounts, Ncounts_c, Nall, Nall_c, mean90, std90, mean90_c, std90_c = getStat(fileS,volume,unitVolume)
col5_s = fits.Column( name="dN_counts_sat",format="D", array= Ncounts )
col6_s = fits.Column( name="dN_counts_sat_c",format="D", array= Ncounts_c)
col7_s = fits.Column( name="dNdV_sat",format="D", array= Nall )
col8_s = fits.Column( name="dNdV_sat_c",format="D", array= Nall_c )
col9_s = fits.Column( name="dNdVdlnV_sat",format="D", array= Nall/dlnbin )
col10_s = fits.Column( name="dNdVdlnV_sat_c",format="D", array= Nall_c / dlnbin )
col11_s = fits.Column( name="std90_pc_sat",format="D", array= std90 )
col12_s = fits.Column( name="std90_pc_sat_c",format="D", array= std90_c)
tbhdu = fits.BinTableHDU.from_columns([col0, col1,col1b, col2a, col2b, col3, col4, col5_0, col5_1, col5_2, col5, col6, col7, col8, col9, col10, col11, col12, col5_s, col6_s, col7_s, col8_s, col9_s, col10_s, col11_s, col12_s])
prihdr = fits.Header()
prihdr['AUTHOR'] = 'J. Comparat'
prihdr['DATE'] = time.time()
prihdu = fits.PrimaryHDU(header=prihdr)
thdulist = fits.HDUList([prihdu, tbhdu])
writeName = join(os.environ['VMAX_DIR'], "data", boxName+"_"+str(boxRedshift)+"_"+qty+".fits")
if os.path.isfile(writeName):
os.remove(writeName)
thdulist.writeto(writeName)
redshift = 0.
hmf = get_hf(sigma_val=0.8228, boxRedshift=0., delta_wrt='mean')
ks = hmf.k
pks = hmf.power
nbar = interp1d(hmf.sigma, hmf.rho_gtm/hmf.m)
sigma_to_m = interp1d(hmf.sigma, hmf.M)
m_to_sigma = interp1d(hmf.M, hmf.sigma)
shot_simple = lambda sigma, volume: (nbar(sigma) * volume)**(-1.)
#shot_double_raw = lambda s1, s2, volume: ((nbar(s1)+nbar(s2))/2. * volume)**(-1.)
shot_double_raw = lambda s1, s2, volume: ((nbar(s1)*nbar(s2))**0.5 * 2*volume)**(-1.)
def shot_double(s1, s2, volume, binW):
s1_max = m_to_sigma(10**(n.log10(sigma_to_m(s1))+binW))
s2_max = m_to_sigma(10**(n.log10(sigma_to_m(s2))+binW))
return shot_double_raw(s1_max, s2_max, volume)
def shot_noise_it(sigma, volume):
print("sigma", sigma)
if n.log10(sigma_to_m(sigma))>=14:
s_min = m_to_sigma(10**(n.log10(sigma_to_m(sigma))-0.025))
s_max = m_to_sigma(10**(n.log10(sigma_to_m(sigma))+0.025))
sns = n.array([ shot_simple(sigma, volume), shot_simple(s_min, volume), shot_simple(s_max, volume)])
print(sns )
return n.max(sns)
else:
s_min = m_to_sigma(10**(n.log10(sigma_to_m(sigma))-0.125))
s_max = m_to_sigma(10**(n.log10(sigma_to_m(sigma))+0.125))
sns = n.array([ shot_simple(sigma, volume), shot_simple(s_min, volume), shot_simple(s_max, volume)])
print(sns)
return n.max(sns)
shot_noise = lambda sigma, volume: n.array([shot_noise_it(sig, volume) for sig in sigma])
meanDensity = (3840.**3.)*1.51 # (Msun/h) / (Mpc/h)**3
def integral(fun):
xmin = n.log10(fun.x.min())
xmax = n.log10(fun.x.max())
innerbounds = n.arange(xmin, xmax,1)[1:]
bounds = n.hstack((xmin, innerbounds, xmax))
out = n.empty(len(innerbounds)+1)
out_err = n.empty(len(innerbounds)+1)
for ii in range(len(innerbounds)+1):
out[ii], out_err[ii] = quad(fun, 10**bounds[ii], 10**bounds[ii+1])
return n.sum(out), n.sum(out_err)
# window function,
# -------------------
# tophat
w_th = lambda k, r : 3*(n.sin(k*r) - (k*r) * n.cos(k*r) )/(k*r)**3.
# computes the moments of the linear pk for the whole volume
# ---------------------------------------------
Lboxes = n.array([400., 1000., 2500., 4000., 8000.]) # Mpc/h
volumes = Lboxes**3.
rboxes = (3. * volumes/(4.*n.pi))**(1./3.)
print("all",rboxes )
sigs = []
for rbox in rboxes:
ws_th = w_th(ks, rbox)**2.
fun = interp1d(ks, ws_th * pks/(2*n.pi)**3./n.sum(ws_th))
sigs.append(integral(fun))
covariance_factor = n.transpose(sigs)[0]
print("cf", covariance_factor )
# computes the moments of the linear pk for 90% of the volume
# ---------------------------------------------
Lboxes = n.array([400., 1000., 2500., 4000., 8000.])
volumes = Lboxes**3. *0.9
rboxes = (3. * volumes/(4.*n.pi))**(1./3.)
print("90%",rboxes )
sigs = []
for rbox in rboxes:
ws_th = w_th(ks, rbox)**2.
fun = interp1d(ks, ws_th * pks/(2*n.pi)**3./n.sum(ws_th))
sigs.append(integral(fun))
covariance_factor_90 = n.transpose(sigs)[0]
print("cf", covariance_factor_90 )
Lboxes = n.array([400., 1000., 2500., 4000., 8000.])/10. # Mpc/h
volumes = Lboxes**3.
rboxes = (3. * volumes/(4.*n.pi))**(1./3.)
print("1/1000:",rboxes )
sigs = []
for rbox in rboxes:
ws_th = w_th(ks, rbox)**2.
fun = interp1d(ks, ws_th * pks/(2*n.pi)**3./n.sum(ws_th))
sigs.append(integral(fun))
covariance_factor_jk = n.transpose(sigs)[0]
print("cf", covariance_factor_jk )
Lboxes = n.array([400., 1000., 2500., 4000., 8000.])/10. # Mpc/h
volumes = Lboxes**3.*27.
rboxes = (3. * volumes/(4.*n.pi))**(1./3.)
print("1/1000:",rboxes )
sigs = []
for rbox in rboxes:
ws_th = w_th(ks, rbox)**2.
fun = interp1d(ks, ws_th * pks/(2*n.pi)**3./n.sum(ws_th))
sigs.append(integral(fun))
covariance_factor_jk2 = n.transpose(sigs)[0]
print("cf", covariance_factor_jk2 )
|
JohanComparat/nbody-npt-functions
|
python/lib_functions_1pt.py
|
Python
|
cc0-1.0
| 51,018
|
[
"TINKER"
] |
d7f0a89e14e246dbce392cf64db12019bdc6ad95f2295145611043a39802a431
|
"""Math commands"""
from .sr850_driver import SR850Driver
class SR850Math(SR850Driver):
"""Math commands"""
def smth(self, width):
"""Smooth the data trace of the active display.
This command may take some time to complete. If a scan is in progress,
this method will pause the scan.
:param width: the smoothing width
:type width: str
"""
param = ['5 points', '11 points', '17 points', '21 points', '25 points']
self._set('SMTH {}'.format(param.index(width)))
def copr(self, operation=None):
"""Sets or queries the type of math operation selected.
:param operation: the math operation
:type operation: str
:returns: the math operation
:rtype: str
"""
ops = ['+', '-', '*', '/', 'sin', 'cos', 'tan', 'square root',
'square', 'log', 'power of 10']
if operation is not None:
self._set('COPR {}'.format(ops.index(operation)))
return ops[int(self._query('COPR?'))]
def calc(self):
"""Starts the calculation selected by copr().
This may take some time.
"""
self._set('CALC')
def cagt(self, type_=None):
"""Sets or queries the argument type.
:parameter type_: the argument type
:type type_: str
:returns: the argument type
:rtype: str
"""
param = ['Trace', 'Constant']
if type_ is not None:
self._set('CAGT {}'.format(param.index(type_)))
return param[int(self._query('CAGT?'))]
def ctrc(self, trace=None):
"""Sets or queries the trace argument number.
The selected trace must be stored.
:param trace: the trace number
:type trace: int
:returns: the trace number
:rtype: int
"""
if trace is not None:
self._set('CTRC {}'.format(trace))
return int(self._query('CTRC?'))
def carg(self, value=None):
"""Sets or queries the constant argument value.
:param value: the constant argument value
:type value: float
:returns: the constant argument value
:rtype: float
"""
if value is not None:
self._set('CARG {}'.format(value))
return float(self._query('CARG?'))
def ftyp(self, fit=None):
"""Sets or queries the type of fit.
:param fit: the type of fit
:type fit: str
:returns: the type of fit
:rtype: str
"""
param = ['Line', 'Exponential', 'Gaussian']
if fit is not None:
self._set('FTYP {}'.format(param.index(fit)))
return param[int(self._query('FTYP?'))]
def fitt(self, start, end):
"""Starts the fitting calculation.
The fit takes place between *start%* and *end%* and *end* must be
larger than *start*. This fit may take some time. If a scan is in
progress, it will be paused.
:param start: the start point from the left side of the screen (percentage)
:type start: int
:param end: the end point from the left side of the screen (percentage)
:type end: int
"""
self._set('FITT {}, {}'.format(start, end))
def pars(self, parameter):
"""Queries the fit parameters after a curve fit has been performed.
If no fit has been performed or the selected parameter is unused in the
fit, this could return invalid data.
:param parameter: the fit parameter
:type parameter: str
:returns: the fit parameter value
:rtype: float
"""
param = ['a', 'b', 'c', 't0']
return float(self._query('PARS {}'.format(param.index(parameter))))
def stat(self, start, end):
"""Starts the statistics calulations.
Only the data within the chart region defined between *start%* and
*end%* (*end* must be larger than *start*) are analyzed. The analysis
may take some time.
:param start: the start point from the left side of the screen (percentage)
:type start: int
:param end: the end point from the left side of the screen (percentage)
:type end: int
"""
self._set('STAT {}, {}'.format(start, end))
def spar(self, statistic):
"""Queries the results of the statistical calculation.
If no analysis has been performed this will return invalid data.
:param statistic: the statistic parameter
:type statistic: str
:returns: the result of the parameter
:rtype: float
"""
param = ['mean', 'standard deviation', 'total data', 'delta time']
return float(self._query('SPAR {}'.format(param.index(statistic))))
|
PALab/PLACE
|
place/plugins/sr850_amp/sr850_math.py
|
Python
|
lgpl-3.0
| 4,769
|
[
"Gaussian"
] |
0135e1f4370a15be1ff59c2b814db65973ed179cb5d9e19c648064bcacacb480
|
from boltons.iterutils import remap
from mongoengine.queryset.visitor import Q
from mpcontribs.api import enter
from mpcontribs.api.projects.document import Projects
from mpcontribs.api.contributions.document import Contributions
def visit(path, key, value):
if isinstance(value, dict) and "display" in value:
return key, value["display"]
return True
def fix_units(name):
# make sure correct units are indicated in project.columns before running this
fields = list(Contributions._fields.keys())
project = Projects.objects.with_id(name).reload("columns")
query = Q()
for column in project.columns:
if column.unit and column.unit != "NaN":
path = column.path.replace(".", "__")
q = {f"{path}__unit__ne": column["unit"]}
query |= Q(**q)
contribs = Contributions.objects(Q(project=name) & query).only(*fields)
num = contribs.count()
print(name, num)
for idx, contrib in enumerate(contribs):
contrib.data = remap(contrib.data, visit=visit, enter=enter) # pull out display
contrib.save(signal_kwargs={"skip": True}) # reparse display with intended unit
if idx and not idx%250:
print(idx)
# additional maintenance functions
# TODO generate JSON/CSV project downloads
# TODO clean dangling notebooks
# TODO update_projects/stats
|
materialsproject/MPContribs
|
mpcontribs-api/maintenance.py
|
Python
|
mit
| 1,365
|
[
"VisIt"
] |
fa1974021f35e7e0ba997b69f9a0f00fb86c63f5a5d32d67a7dc3de718b3f161
|
'''
Created on Feb 23, 2016
@author: jason
'''
import numpy as np
from sklearntools.sklearntools import StagedEstimator, MaskedEstimator,\
ColumnSubsetTransformer, NonNullSubsetFitter, safe_assign_column
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.logistic import LogisticRegression
from sklearntools.calibration import CalibratedEstimatorCV, ResponseTransformingEstimator,\
LogTransformer, PredictorTransformer, HazardToRiskEstimator,\
MovingAverageSmoothingEstimator, ThresholdClassifier, ProbaPredictingEstimator
from sklearntools.feature_selection import SingleEliminationFeatureImportanceEstimatorCV,\
BackwardEliminationEstimator, UnivariateFeatureImportanceEstimatorCV,\
BestKFeatureSelector
from numpy.testing.utils import assert_raises
from sklearntools.glm import GLM
import statsmodels.api as sm
import warnings
import pandas
from sklearntools.model_selection import ModelSelector
from sklearntools.scoring import log_loss_metric
from sklearn.ensemble.forest import RandomForestRegressor
from numpy.ma.testutils import assert_array_almost_equal
from sklearntools.earth import Earth
from sklearntools.kfold import CrossValidatingEstimator
from sklearn.metrics.regression import r2_score
from sklearn.model_selection import KFold
from nose.tools import assert_list_equal
warnings.simplefilter("error")
def test_safe_assign_column():
data = pandas.DataFrame({'A': [1,2,3], 'B': [4,5,6]})
safe_assign_column(data, 'A', [7,8,9])
assert_list_equal(list(sorted(data.columns)), ['A', 'B'])
def test_single_elimination_feature_importance_estimator_cv():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_sequence = np.ravel(np.argsort(beta ** 2, axis=0))
model1 = SingleEliminationFeatureImportanceEstimatorCV(LinearRegression())
model1.fit(X, y)
fitted_sequence = np.ravel(np.argsort(model1.feature_importances_, axis=0))
np.testing.assert_array_equal(fitted_sequence, target_sequence)
def test_univariate_feature_importance_estimator_cv():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_sequence = np.ravel(np.argsort(beta ** 2, axis=0))
model1 = UnivariateFeatureImportanceEstimatorCV(LinearRegression())
model1.fit(X, y)
fitted_sequence = np.ravel(np.argsort(model1.feature_importances_, axis=0))
np.testing.assert_array_equal(fitted_sequence, target_sequence)
def test_k_best_feature_selector():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
# beta = np.random.normal(size=(n,1))
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_vars = np.ravel(np.argsort(beta ** 2, axis=0))[::-1][:3]
target_support = np.zeros(shape=n, dtype=bool)
target_support[target_vars] = True
model1 = BestKFeatureSelector(UnivariateFeatureImportanceEstimatorCV(LinearRegression()), k=3)
model1.fit(X, y)
np.testing.assert_array_equal(model1.support_, target_support)
def test_backward_elimination_estimation():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
# beta = np.random.normal(size=(n,1))
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_sequence = np.ravel(np.argsort(beta ** 2, axis=0))
model1 = BackwardEliminationEstimator(SingleEliminationFeatureImportanceEstimatorCV(LinearRegression()))
model1.fit(X, y)
# model2 = BRFE(FeatureImportanceEstimatorCV(LinearRegression()))
# model2.fit(X, y)
np.testing.assert_array_equal(model1.elimination_sequence_, target_sequence)
def test_multiple_response_regressor():
np.random.seed(1)
m = 100000
n = 10
X = np.random.normal(size=(m,n))
beta1 = np.random.normal(size=(n,1))
beta2 = np.random.normal(size=(n,1))
y1 = np.dot(X, beta1)
p2 = 1. / (1. + np.exp( - np.dot(X, beta2)))
y2 = np.random.binomial(n=1, p=p2)
y = np.concatenate([y1, y2], axis=1)
model = MaskedEstimator(LinearRegression(), [True, False]) & MaskedEstimator(ProbaPredictingEstimator(LogisticRegression()), [False, True])
# MultipleResponseEstimator([('linear', np.array([True, False], dtype=bool), LinearRegression()),
# ('logistic', np.array([False, True], dtype=bool), ProbaPredictingEstimator(LogisticRegression()))])
model.fit(X, y)
assert np.mean(beta1 - model.estimators_[0].estimator_.coef_) < .01
assert np.mean(beta2 - model.estimators_[1].estimator_.estimator_.coef_) < .01
model.get_params()
model.predict(X)
def test_calibration():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y_lin = np.dot(X, beta)
y_clas = np.random.binomial( 1, 1. / (1. + np.exp(-y_lin)) )
y = np.concatenate([y_lin, y_clas], axis=1)
estimator = MaskedEstimator(LinearRegression(), np.array([True, False], dtype=bool))
calibrator = MaskedEstimator(LogisticRegression(), [False, True])
# estimator = linear_regressor & calibrator
# MultipleResponseEstimator([('estimator', np.array([True, False], dtype=bool), LinearRegression())])
# calibrator = MultipleResponseEstimator([('calibrator', np.array([False, True], dtype=bool), LogisticRegression())])
model = CalibratedEstimatorCV(estimator, calibrator, cv=KFold(n_splits=4, shuffle=True), n_jobs=1)
model.fit(X, y)
assert np.max(beta[:, 0] - model.estimator_.estimator_.coef_) < .000001
assert np.max(model.calibrator_.estimator_.coef_ - 1.) < .1
def test_predictor_transformer_calibration():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y_lin = np.dot(X, beta)
y_clas = np.random.binomial( 1, 1. / (1. + np.exp(-y_lin)) )
y = np.concatenate([y_lin, y_clas], axis=1)
estimator = MaskedEstimator(LinearRegression(), np.array([True, False], dtype=bool))
calibrator = MaskedEstimator(LogisticRegression(), [False, True])
# estimator = linear_regressor & calibrator
# MultipleResponseEstimator([('estimator', np.array([True, False], dtype=bool), LinearRegression())])
# calibrator = MultipleResponseEstimator([('calibrator', np.array([False, True], dtype=bool), LogisticRegression())])
model = PredictorTransformer(estimator) >> calibrator
model.fit(X, y)
assert np.max(beta[:, 0] - model.intermediate_stages_[0].estimator_.estimator_.coef_) < .000001
assert np.max(model.final_stage_.estimator_.coef_ - 1.) < .1
def test_pipeline():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
beta[np.random.binomial(p=2.0/float(n), n=1, size=n).astype(bool)] = 0
y = np.dot(X, beta) + 0.5 * np.random.normal(size=(m, 1))
beta_reduced = beta[beta != 0]
model = BackwardEliminationEstimator(SingleEliminationFeatureImportanceEstimatorCV(LinearRegression()))
model >>= LinearRegression()
model.fit(X, y)
assert np.max(np.abs(model.final_stage_.coef_ - beta_reduced)) < .1
def test_response_transforming_estimator():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
sigma = .1
y_pre = np.dot(X, beta) + sigma * np.random.normal(size=(m,1))
y_post = np.exp(y_pre)
model = ResponseTransformingEstimator(LinearRegression(), LogTransformer(offset=0.))
model.fit(X, y_post)
assert np.abs(np.mean(model.predict(X) - y_pre)) < .01
# Because LinearRegression has no transform method
assert_raises(AttributeError, lambda: model.transform(X))
def test_hazard_to_risk():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ProbaPredictingEstimator(ThresholdClassifier(HazardToRiskEstimator(LogisticRegression()))))
model.fit(X, rate, exposure=exposure)
y_pred = model.predict(X, exposure)
assert np.abs((np.sum(y_pred) - np.sum(rate > 0)) / np.sum(rate > 0)) < .1
assert np.max(np.abs(model.estimator_.coef_ - beta[:,0])) < .1
def test_hazard_to_risk_staged():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ProbaPredictingEstimator(ThresholdClassifier(HazardToRiskEstimator(LogisticRegression()))))
model.fit(X, rate, exposure=exposure)
y_pred = model.predict(X, exposure)
assert np.abs((np.sum(y_pred) - np.sum(rate > 0)) / np.sum(rate > 0)) < .1
assert np.max(np.abs(model.estimator_.coef_ - beta[:,0])) < .1
def test_moving_average_smoothing_estimator():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ThresholdClassifier(HazardToRiskEstimator(MovingAverageSmoothingEstimator(RandomForestRegressor()))))
model.fit(X, rate, exposure=exposure)
y_pred = model.predict(X, exposure)
assert np.abs((np.sum(y_pred) - np.sum(rate > 0)) / np.sum(rate > 0)) < .1
assert np.max(np.abs(model.estimator_.coef_ - beta[:,0])) < .1
def test_staged_estimator():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
beta[np.random.binomial(p=2.0/float(n), n=1, size=n).astype(bool)] = 0
y = np.dot(X, beta) + 0.5 * np.random.normal(size=(m, 1))
beta_reduced = beta[beta != 0]
stage0 = BackwardEliminationEstimator(SingleEliminationFeatureImportanceEstimatorCV(LinearRegression()))
stage1 = LinearRegression()
model = StagedEstimator([stage0, stage1])
model.fit(X, y)
assert np.max(np.abs(model.final_stage_.coef_ - beta_reduced)) < .1
#
# y_lin = np.dot(X, beta)
# y_clas = np.random.binomial( 1, 1. / (1. + np.exp(-y_lin)) )
# y = np.concatenate([y_lin, y_clas], axis=1)
# estimator = mask_estimator(LinearRegression(), np.array([True, False], dtype=bool))
# calibrator = mask_estimator(LogisticRegression(), [False, True])
# # estimator = linear_regressor & calibrator
# # MultipleResponseEstimator([('estimator', np.array([True, False], dtype=bool), LinearRegression())])
# # calibrator = MultipleResponseEstimator([('calibrator', np.array([False, True], dtype=bool), LogisticRegression())])
# model = CalibratedEstimatorCV(estimator, calibrator)
# model.fit(X, y)
# assert np.max(beta[:, 0] - model.estimator_.estimators_[0][2].coef_) < .000001
# assert np.max(model.calibrator_.estimators_[0][2].coef_ - 1.) < .1
def test_column_subset_transformer():
m = 1000
n = 10
X = np.random.normal(size=(m,n))
x_cols = [0,3,4,5]
y_cols = 9
sample_weight_cols = 8
exposure_cols = 7
subsetter1 = ColumnSubsetTransformer(x_cols=x_cols, y_cols=y_cols,
sample_weight_cols=sample_weight_cols,
exposure_cols=exposure_cols)
np.testing.assert_array_equal(subsetter1.transform(X), X[:, x_cols])
args = {'X': X}
subsetter1.update(args)
np.testing.assert_array_equal(args['X'], X[:, x_cols])
np.testing.assert_array_equal(args['y'], X[:, y_cols])
np.testing.assert_array_equal(args['sample_weight'], X[:, sample_weight_cols])
np.testing.assert_array_equal(args['exposure'], X[:, exposure_cols])
X_ = pandas.DataFrame(X, columns=['x%d' % n for n in range(10)])
x_cols_ = ['x%d' % n for n in x_cols]
y_cols_ = 'x%d' % y_cols
sample_weight_cols_ = 'x%d' % sample_weight_cols
exposure_cols_ = 'x%d' % exposure_cols
subsetter2 = ColumnSubsetTransformer(x_cols=x_cols_, y_cols=y_cols_,
sample_weight_cols=sample_weight_cols_,
exposure_cols=exposure_cols_)
np.testing.assert_array_equal(subsetter2.transform(X_), X[:, x_cols])
args_ = {'X': X_}
subsetter2.update(args_)
np.testing.assert_array_equal(args_['X'], X[:, x_cols])
np.testing.assert_array_equal(args_['y'], X[:, y_cols])
np.testing.assert_array_equal(args_['sample_weight'], X[:, sample_weight_cols])
np.testing.assert_array_equal(args_['exposure'], X[:, exposure_cols])
lin = ColumnSubsetTransformer(x_cols=x_cols_, y_cols=y_cols_) >> LinearRegression()
lin.fit(X_)
lin.predict(X_.loc[:, x_cols_])
lin.score(X_)
def test_model_selector():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
best_subset = np.ravel(np.argsort(np.abs(beta))[::-1][:3])
worst_subset = np.ravel(np.argsort(np.abs(beta))[:3])
basic_model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ProbaPredictingEstimator(ThresholdClassifier(HazardToRiskEstimator(LogisticRegression()))))
model1 = CrossValidatingEstimator(ColumnSubsetTransformer(x_cols=best_subset) >> basic_model, metric=log_loss_metric)
model2 = CrossValidatingEstimator(ColumnSubsetTransformer(x_cols=worst_subset) >> basic_model, metric=log_loss_metric)
model = ModelSelector([model1, model2])
model.fit(X, rate, exposure=exposure)
np.testing.assert_array_equal(model.best_estimator_.estimator_.intermediate_stages_[0].x_cols, best_subset)
def test_cross_validating_estimator():
np.random.seed(1)
m = 1000
n = 5
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y = np.ravel(np.dot(X, beta)) + np.random.normal(.5, size=m)
model = CrossValidatingEstimator(LinearRegression(), cv=KFold(n_splits=4, shuffle=True), n_jobs=2)
y_pred_cv = model.fit_predict(X, y)
y_pred = model.predict(X)
assert r2_score(np.ravel(y_pred_cv), np.ravel(y_pred)) > .98
def test_non_null_row_subset_fitter():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y = np.ravel(np.dot(X, beta))
missing = np.random.binomial(p=.001, n=1, size=(m,n)) == 1
X[missing] = None
model = NonNullSubsetFitter(LinearRegression())
model.fit(X, y)
assert np.max(np.abs(np.ravel(beta) - model.estimator_.coef_)) < .001
def test_linear_transformation():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta1 = np.random.normal(size=(n,1))
y1 = np.ravel(np.dot(X, beta1))
beta2 = np.random.normal(size=(n,1))
y2 = np.ravel(np.dot(X, beta2))
model1 = (Earth() >> LinearRegression()).fit(X, y1)
model2 = Earth().fit(X, y2)
combination = 2*model1 - model2
assert_array_almost_equal(combination.predict(X), 2 * np.ravel(model1.predict(X)) - np.ravel(model2.predict(X)))
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
|
jcrudy/sklearntools
|
sklearntools/test/test_sklearntools.py
|
Python
|
bsd-3-clause
| 17,863
|
[
"Gaussian"
] |
c2a7cb3d16b8d66fe17bd0aded9ed044a4796fc8a372a63b09d1ddd9c023fc44
|
import numpy as np
import scipy.stats as ss
import scipy.special as sp
from .family import Family
from .flat import Flat
from .normal import Normal
from .gas_recursions import gas_recursion_t_orderone, gas_recursion_t_ordertwo
from .gas_recursions import gasx_recursion_t_orderone, gasx_recursion_t_ordertwo
from .gas_recursions import gas_llev_recursion_t_orderone, gas_llev_recursion_t_ordertwo
from .gas_recursions import gas_llt_recursion_t_orderone, gas_llt_recursion_t_ordertwo
from .gas_recursions import gas_reg_recursion_t_orderone, gas_reg_recursion_t_ordertwo
class t(Family):
"""
Student t Distribution
----
This class contains methods relating to the Student t distribution for time series.
"""
def __init__(self, loc=0.0, scale=1.0, df=8.0, transform=None, **kwargs):
"""
Parameters
----------
loc : float
Location parameter for the t distribution
scale : float
Scale parameter for the t distribution
df : float
Degrees of freedom parameter for the t distribution
transform : str
Whether to apply a transformation to the location variable - e.g. 'exp' or 'logit'
"""
super(t, self).__init__(transform)
self.loc0 = loc
self.scale0 = scale
self.df0 = df
self.covariance_prior = False
self.gradient_only = kwargs.get('gradient_only', False) # used for GAS t models
if self.gradient_only is True:
self.score_function = self.first_order_score
else:
self.score_function = self.second_order_score
def approximating_model(self, beta, T, Z, R, Q, h_approx, data):
""" Creates approximating Gaussian state space model for t measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no):
""" Creates approximating Gaussian state space model for t measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
X: np.array
The regressors
state_no : int
Number of states
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
@staticmethod
def build_latent_variables():
""" Builds additional latent variables for this family
Returns
----------
- A list of lists (each sub-list contains latent variable information)
"""
lvs_to_build = []
lvs_to_build.append(['t Scale', Flat(transform='exp'), Normal(0, 3), 0.01])
lvs_to_build.append(['v', Flat(transform='exp'), Normal(0, 3), 2.5])
return lvs_to_build
@staticmethod
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from t distribution
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return loc + scale*np.random.standard_t(shape,nsims)
@staticmethod
def first_order_score(y, mean, scale, shape, skewness):
""" GAS t Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the t distribution
scale : float
scale parameter for the t distribution
shape : float
tail thickness parameter for the t distribution
skewness : float
skewness parameter for the t distribution
Returns
----------
- Score of the t family
"""
return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2)/shape))
def logpdf(self, mu):
"""
Log PDF for t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.t.logpdf(mu, df=self.df0, loc=self.loc0, scale=self.scale0)
@staticmethod
def markov_blanket(y, mean, scale, shape, skewness):
""" Markov blanket for the Poisson distribution
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the t distribution
scale : float
scale parameter for the t distribution
shape : float
tail thickness parameter for the t distribution
skewness : float
skewness parameter for the t distribution
Returns
----------
- Markov blanket of the t family
"""
return ss.t.logpdf(x=y, df=shape, loc=mean, scale=scale)
@staticmethod
def setup():
""" Returns the attributes of this family
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "t"
link = np.array
scale = True
shape = True
skewness = False
mean_transform = np.array
cythonized = True
return name, link, scale, shape, skewness, mean_transform, cythonized
@staticmethod
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the t distribution
scale : float
scale parameter for the t distribution
shape : float
tail thickness parameter for the t distribution
skewness : float
skewness parameter for the t distribution
Returns
----------
- Negative loglikelihood of the t family
"""
return -np.sum(ss.t.logpdf(x=y, df=shape, loc=mean, scale=scale))
def pdf(self, mu):
"""
PDF for t prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.t.pdf(mu, df=self.df0, loc=self.loc0, scale=self.scale0)
@staticmethod
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS t Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the t distribution
scale : float
scale parameter for the t distribution
shape : float
tail thickness parameter for the t distribution
skewness : float
skewness parameter for the t distribution
Returns
----------
- Score of the t family
"""
return ((shape+1)/shape)*((y-mean)*X)/(np.power(scale,2)+np.power((y-mean),2)/shape)
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS t Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the t distribution
scale : float
scale parameter for the t distribution
shape : float
tail thickness parameter for the t distribution
skewness : float
skewness parameter for the t distribution
Returns
----------
- Adjusted score of the t family
"""
return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2)/shape))/((shape+1)*((np.power(scale,2)*shape) - np.power(y-mean,2))/np.power((np.power(scale,2)*shape) + np.power(y-mean,2),2))
# Optional Cythonized recursions below for GAS t models
@staticmethod
def gradient_recursion():
""" GAS t Model Recursion - gradient only
Returns
----------
- Recursion function for GAS t model - gradient only
"""
return gas_recursion_t_orderone
@staticmethod
def newton_recursion():
""" GAS t Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS t model - adjusted score
"""
return gas_recursion_t_ordertwo
@staticmethod
def gradientx_recursion():
""" GASX t Model Recursion - gradient only
Returns
----------
- Recursion function for GASX t model - gradient only
"""
return gasx_recursion_t_orderone
@staticmethod
def newtonx_recursion():
""" GASX t Model Recursion - adjusted score
Returns
----------
- Recursion function for GASX t model - adjusted score
"""
return gasx_recursion_t_ordertwo
@staticmethod
def gradientllev_recursion():
""" GAS Local Level t Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Level t model - gradient only
"""
return gas_llev_recursion_t_orderone
@staticmethod
def newtonllev_recursion():
""" GAS Local Level t Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Level t model - adjusted score
"""
return gas_llev_recursion_t_ordertwo
@staticmethod
def gradientllt_recursion():
""" GAS Local Linear Trend t Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Linear Trend t model - gradient only
"""
return gas_llt_recursion_t_orderone
@staticmethod
def newtonllt_recursion():
""" GAS Local Linear Trend t Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Linear Trend t model - adjusted score
"""
return gas_llt_recursion_t_ordertwo
@staticmethod
def gradientreg_recursion():
""" GAS Dynamic Regression t Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Dynamic Regression t model - gradient only
"""
return gas_reg_recursion_t_orderone
@staticmethod
def newtonreg_recursion():
""" GAS Dynamic Regression t Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Dynamic Regression t model - adjusted score
"""
return gas_reg_recursion_t_ordertwo
|
RJT1990/pyflux
|
pyflux/families/t.py
|
Python
|
bsd-3-clause
| 13,051
|
[
"Gaussian"
] |
2a4dac965c2796e83fb0ec4b190521b68cc904310d6c7db3f55bb275796af23d
|
import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from methods_exist.trackers.kcf_tracker.RectTools import *
class KCFTracker():
def __init__(self, roi, image, fixed_window=False, multiscale=True, hog=True):
self._roi = Rect(roi[0], roi[1], roi[2], roi[3])
self.first_frame = image
self.lamda = 0.0001 # regularization
self.padding = 2.5 # extra area surrounding the target
self.output_sigma_factor = 0.125
self._tmpl_sz = Size(0,0)
self._scale = 0.0
self.size_patch = [0, 0, 0]
self.hann = np.array([])
# init the parameter based on the tracking methods
if hog : # HOG
# VOT
self.interp_factor = 0.012
self.sigma = 0.6
# TPAMI
# interp_factor = 0.02
# sigma = 0.5
self.cell_size = 4
self._hogfeatures = False
else : # RAW
self.interp_factor = 0.075
self.sigma = 0.2
self.cell_size = 1
self._hogfeatures = False
if multiscale: # multiscale
self.template_size = 96.0
# template_size = 100;
self.scale_step = 1.05
self.scale_weight = 0.95
if not fixed_window:
# printf("Multiscale does not support non-fixed window.\n");
self.fixed_window = True
elif fixed_window: # fit correction without multiscale
self.template_size = 96.0
# template_size = 100
self.scale_step = 1
else :
self.template_size = 1
self.scale_step = 1
self.init(self._roi,self.first_frame)
def init(self, roi, image):
self._tmpl = self.getFeatures(self.first_frame, True, 1)
_prob = self.createGaussianPeak(self.size_patch[0], self.size_patch[1])
self.train(self._tmpl, 1.0)
# get features from the roi arera
def getFeatures(self, image, inithann, scale_adjust):
'''
:@ this part transform the roi Rect to extracted_roi Rect by some operations
:param image:
:param inithann:
:param scale_adjust:
:return:
'''
extracted_roi = Rect(0,0,0,0)
# get the center point of the roi rect
cx = self._roi.x + self._roi.width / 2.
cy = self._roi.y + self._roi.height / 2.
# init the hanning window, Only at the first frame
if inithann:
padded_w = int(self._roi.width * self.padding)
padded_h = int(self._roi.height * self.padding)
if self.template_size > 1 : # Fit largest dimension to the given template size
if padded_w >= padded_h: # fit to width
self._scale = padded_w / self.template_size
else:
self._scale = padded_h / self.template_size
self._tmpl_sz.width = int(padded_w / self._scale) #模板的尺寸取最大边,小一点的边按照比例缩放
self._tmpl_sz.height = int(padded_h / self._scale)
else: # No template size given, use ROI size
self._tmpl_sz.width = int(padded_w)
self._tmpl_sz.height = int(padded_h)
self._scale = 1
if self._hogfeatures :
# Round to cell size and also make it even
self._tmpl_sz.width = ( ( int(self._tmpl_sz.width / (2 * self.cell_size)) ) * 2 * self.cell_size ) + self.cell_size * 2
self._tmpl_sz.height = ( ( int(self._tmpl_sz.height / (2 * self.cell_size)) ) * 2 * self.cell_size ) + self.cell_size * 2
else : # Make number of pixels even (helps with some logic involving half-dimensions)
self._tmpl_sz.width = (self._tmpl_sz.width / 2) * 2
self._tmpl_sz.height = (self._tmpl_sz.height / 2) * 2
# 检测区域大小
extracted_roi.width = int(scale_adjust * self._scale * self._tmpl_sz.width)
extracted_roi.height = int(scale_adjust * self._scale * self._tmpl_sz.height)
# center roi with new size
extracted_roi.x = int(cx - extracted_roi.width / 2)
extracted_roi.y = int(cy - extracted_roi.height / 2)
z = subwindow(img=image, window=extracted_roi, borderType=cv2.BORDER_REPLICATE)
if (z.shape[1] != self._tmpl_sz.width) or (z.shape[0] != self._tmpl_sz.height) :
z = cv2.resize(z, # resize the current image size
(int(self._tmpl_sz.width), int(self._tmpl_sz.height)),
interpolation=cv2.INTER_LINEAR)
# Pixel features
FeatureMap = getGrayImage(z)
FeatureMap -= float(0.5)
self.size_patch[0] = z.shape[0]
self.size_patch[1] = z.shape[1]
self.size_patch[2] = 1
self.createHanningMats()
FeatureMap = self.hann * FeatureMap
return FeatureMap
def createHanningMats(self):
self.hann = cv2.createHanningWindow((self.size_patch[1], self.size_patch[0]), cv2.CV_32F)
## Visualizition
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.array([j for j in range(self.hann.shape[0])])
Y = np.array([j for j in range(self.hann.shape[1])])
X, Y = np.meshgrid(X, Y)
Z = np.array(np.zeros((self.hann.shape[1],self.hann.shape[0])),dtype=np.float32)
for i in range(self.hann.shape[1]):
for j in range(self.hann.shape[0]):
Z[i, j] = self.hann[j, i]
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
# Plot a basic wireframe.
# ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
plt.title("Hanning window")
plt.show()
def createGaussianPeak(self, sizey, sizex):
res = np.array(np.zeros((sizey, sizex),dtype=np.float32),dtype=np.float32)
syh = (sizey) / 2
sxh = (sizex) / 2
output_sigma = math.sqrt(float(sizex) * sizey) / self.padding * self.output_sigma_factor
mult = -0.5 / (output_sigma * output_sigma)
for i in range(sizey):
for j in range(sizex):
ih = i - syh
jh = j - sxh
res[i, j] = math.exp(mult * float((ih * ih + jh * jh)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.array([j for j in range(res.shape[0])])
Y = np.array([j for j in range(res.shape[1])])
X, Y = np.meshgrid(X, Y)
Z = np.array(np.zeros((res.shape[1], res.shape[0])), dtype=np.float32)
for i in range(res.shape[1]):
for j in range(res.shape[0]):
Z[i, j] = np.fft.fftshift(res)[j, i]
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
# Plot a basic wireframe.
# ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
plt.title("Gaussian peak")
plt.show()
return np.fft.fftshift(res)
def train(self, x, train_interp_factor):
self.gaussainCorrelation(x, x)
def gaussainCorrelation(self, x1, x2):
c = np.array(np.zeros((self.size_patch[0], self.size_patch[1]),dtype=np.float32),dtype=np.float32)
# cv2.mulSpectrums(), is a functiion using for two Fourier freq seqs' per element multiple
c = cv2.dft(x1,flags=cv2.DFT_COMPLEX_OUTPUT) * cv2.dft(x2,flags=cv2.DFT_COMPLEX_OUTPUT)
c = np.fft.fftshift(c)
c = np.real(c)
cv2.imshow('c', c)
cv2.waitKey(0)
aaa = ((sum(x1 * x1)[0] + sum(x2 * x2)[0])- 2. * c) / (self.size_patch[0] * self.size_patch[1] * self.size_patch[2])
d = np.min(((sum(x1 * x1)[0] + sum(x2 * x2)[0])- 2. * c) / (self.size_patch[0] * self.size_patch[1] * self.size_patch[2]), 0)
def rearrange(self, img):
cx = img.shape[1] / 2
cy = img.shape[0] / 2
q0 = img[0 : cx, 0 : cy]
q1 = img[cx : 2 * cx, 0 : cy]
q2 = img[0 : cx, cy : 2 * cy]
q3 = img[cx : 2 * cx, cy : 2 * cy]
|
hehewa/pytorchStudy
|
methods_exist/trackers/kcf_tracker/KCFTracker.py
|
Python
|
apache-2.0
| 8,593
|
[
"Gaussian"
] |
ce18e9c26774c8701389ec59443d1b37866ebe7cb96d3b86374452388b368e31
|
import os as _os
_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not _on_rtd:
import numpy as _np
def gaussian(x, mu, sigma):
"""
Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`.
.. versionadded:: 1.5
Parameters
----------
x : float
Function variable :math:`x`.
mu : float
Mean of the Gaussian function.
sigma : float
Standard deviation of the Gaussian function.
"""
return _np.exp(-(x-mu)**2/(2*sigma**2)) / (_np.sqrt(2*_np.pi) * sigma)
|
joelfrederico/SciSalt
|
scisalt/numpy/functions.py
|
Python
|
mit
| 581
|
[
"Gaussian"
] |
33c1f8e4a7625edb151d1f0b444d7ff29c70c43ad2b0ffdf3564f95bc4954a27
|
# coding: utf-8
#!/usr/bin/env python
from __future__ import division, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.smartio has been moved pymatgen.io.smart. "
"This stub will be removed in pymatgen 4.0.")
from .smart import *
|
rousseab/pymatgen
|
pymatgen/io/smartio.py
|
Python
|
mit
| 487
|
[
"pymatgen"
] |
8bba6156392fa05923c0e2905928c1129afcb05d77ee9c39dcce513a72607003
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGenericStubs(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_SPARC_asm.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004""", "BRIAN PAUL, IBM")
def printRealHeader(self):
print '#ifdef __arch64__'
print '#define GL_OFF(N)\t((N) * 8)'
print '#define GL_LL\t\tldx'
print '#define GL_TIE_LD(SYM)\t%tie_ldx(SYM)'
print '#define GL_STACK_SIZE\t128'
print '#else'
print '#define GL_OFF(N)\t((N) * 4)'
print '#define GL_LL\t\tld'
print '#define GL_TIE_LD(SYM)\t%tie_ld(SYM)'
print '#define GL_STACK_SIZE\t64'
print '#endif'
print ''
print '#define GLOBL_FN(x) .globl x ; .type x, @function'
print '#define HIDDEN(x) .hidden x'
print ''
print '\t.register %g2, #scratch'
print '\t.register %g3, #scratch'
print ''
print '\t.text'
print ''
print '\tGLOBL_FN(__glapi_sparc_icache_flush)'
print '\tHIDDEN(__glapi_sparc_icache_flush)'
print '\t.type\t__glapi_sparc_icache_flush, @function'
print '__glapi_sparc_icache_flush: /* %o0 = insn_addr */'
print '\tflush\t%o0'
print '\tretl'
print '\t nop'
print ''
print '\t.align\t32'
print ''
print '\t.type\t__glapi_sparc_get_pc, @function'
print '__glapi_sparc_get_pc:'
print '\tretl'
print '\t add\t%o7, %g2, %g2'
print '\t.size\t__glapi_sparc_get_pc, .-__glapi_sparc_get_pc'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '\tGLOBL_FN(__glapi_sparc_get_dispatch)'
print '\tHIDDEN(__glapi_sparc_get_dispatch)'
print '__glapi_sparc_get_dispatch:'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\tadd\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsethi\t%tie_hi22(_glapi_tls_Dispatch), %g1'
print '\tadd\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'
print '\tGL_LL\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'
print '\tretl'
print '\t mov\t%g2, %o0'
print ''
print '\t.data'
print '\t.align\t32'
print ''
print '\t/* --> sethi %hi(_glapi_tls_Dispatch), %g1 */'
print '\t/* --> or %g1, %lo(_glapi_tls_Dispatch), %g1 */'
print '\tGLOBL_FN(__glapi_sparc_tls_stub)'
print '\tHIDDEN(__glapi_sparc_tls_stub)'
print '__glapi_sparc_tls_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\tadd\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsrl\t%g3, 10, %g3'
print '\tsethi\t%tie_hi22(_glapi_tls_Dispatch), %g1'
print '\tadd\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'
print '\tGL_LL\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'
print '\tGL_LL\t[%g7+%g2], %g1'
print '\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '\t.size\t__glapi_sparc_tls_stub, .-__glapi_sparc_tls_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_tls_stub;\t\t\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#elif defined(PTHREADS)'
print ''
print '\t/* 64-bit 0x00 --> sethi %hh(_glapi_Dispatch), %g1 */'
print '\t/* 64-bit 0x04 --> sethi %lm(_glapi_Dispatch), %g2 */'
print '\t/* 64-bit 0x08 --> or %g1, %hm(_glapi_Dispatch), %g1 */'
print '\t/* 64-bit 0x0c --> sllx %g1, 32, %g1 */'
print '\t/* 64-bit 0x10 --> add %g1, %g2, %g1 */'
print '\t/* 64-bit 0x14 --> ldx [%g1 + %lo(_glapi_Dispatch)], %g1 */'
print ''
print '\t/* 32-bit 0x00 --> sethi %hi(_glapi_Dispatch), %g1 */'
print '\t/* 32-bit 0x04 --> ld [%g1 + %lo(_glapi_Dispatch)], %g1 */'
print ''
print '\t.data'
print '\t.align\t32'
print ''
print '\tGLOBL_FN(__glapi_sparc_pthread_stub)'
print '\tHIDDEN(__glapi_sparc_pthread_stub)'
print '__glapi_sparc_pthread_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\t add\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsethi\t%hi(_glapi_Dispatch), %g1'
print '\tor\t%g1, %lo(_glapi_Dispatch), %g1'
print '\tsrl\t%g3, 10, %g3'
print '\tGL_LL\t[%g2+%g1], %g2'
print '\tGL_LL\t[%g2], %g1'
print '\tcmp\t%g1, 0'
print '\tbe\t2f'
print '\t nop'
print '1:\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '2:\tsave\t%sp, GL_STACK_SIZE, %sp'
print '\tmov\t%g3, %l0'
print '\tcall\t_glapi_get_dispatch'
print '\t nop'
print '\tmov\t%o0, %g1'
print '\tmov\t%l0, %g3'
print '\tba\t1b'
print '\t restore %g0, %g0, %g0'
print '\t.size\t__glapi_sparc_pthread_stub, .-__glapi_sparc_pthread_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_pthread_stub;\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#else /* Non-threaded version. */'
print ''
print '\t.type __glapi_sparc_nothread_stub, @function'
print '__glapi_sparc_nothread_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\t add\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsrl\t%g3, 10, %g3'
print '\tsethi\t%hi(_glapi_Dispatch), %g1'
print '\tor\t%g1, %lo(_glapi_Dispatch), %g1'
print '\tGL_LL\t[%g2+%g1], %g2'
print '\tGL_LL\t[%g2], %g1'
print '\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '\t.size\t__glapi_sparc_nothread_stub, .-__glapi_sparc_nothread_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_nothread_stub;\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#endif'
print ''
print '#define GL_STUB_ALIAS(fn, alias) \\'
print ' .globl fn; \\'
print ' .set fn, alias'
print ''
print '\t.text'
print '\t.align\t32'
print ''
print '\t.globl\tgl_dispatch_functions_start'
print '\tHIDDEN(gl_dispatch_functions_start)'
print 'gl_dispatch_functions_start:'
print ''
return
def printRealFooter(self):
print ''
print '\t.globl\tgl_dispatch_functions_end'
print '\tHIDDEN(gl_dispatch_functions_end)'
print 'gl_dispatch_functions_end:'
return
def printBody(self, api):
for f in api.functionIterateByOffset():
name = f.dispatch_name()
print '\tGL_STUB(gl%s, %d)' % (name, f.offset)
if not f.is_static_entry_point(f.name):
print '\tHIDDEN(gl%s)' % (name)
for f in api.functionIterateByOffset():
name = f.dispatch_name()
if f.is_static_entry_point(f.name):
for n in f.entry_points:
if n != f.name:
text = '\tGL_STUB_ALIAS(gl%s, gl%s)' % (n, f.name)
if f.has_different_protocol(n):
print '#ifndef GLX_INDIRECT_RENDERING'
print text
print '#endif'
else:
print text
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
mode = "generic"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == '-m':
mode = val
elif arg == "-f":
file_name = val
if mode == "generic":
printer = PrintGenericStubs()
else:
print "ERROR: Invalid mode \"%s\" specified." % mode
show_usage()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api)
|
gzorin/RSXGL
|
extsrc/mesa/src/mapi/glapi/gen/gl_SPARC_asm.py
|
Python
|
bsd-2-clause
| 9,007
|
[
"Brian"
] |
2a155d6dd7cd9068356b7e2fa1e3900cb3698cb34d20cfd59acbfc6f26d2aa62
|
# -*- coding: utf-8 -*-
from shop.models.cartmodel import Cart
from django.contrib.auth.models import AnonymousUser
def get_cart_from_database(request):
database_cart = Cart.objects.filter(user=request.user)
if database_cart:
database_cart = database_cart[0]
else:
database_cart = None
return database_cart
def get_cart_from_session(request):
session_cart = None
session = getattr(request, 'session', None)
if session is not None:
cart_id = session.get('cart_id')
if cart_id:
try:
session_cart = Cart.objects.get(pk=cart_id)
except Cart.DoesNotExist:
session_cart = None
return session_cart
def get_or_create_cart(request, save=False):
"""
Return cart for current visitor.
For a logged in user, try to get the cart from the database. If it's not there or it's empty,
use the cart from the session.
If the user is not logged in use the cart from the session.
If there is no cart object in the database or session, create one.
If ``save`` is True, cart object will be explicitly saved.
"""
cart = None
if not hasattr(request, '_cart'):
is_logged_in = request.user and not isinstance(request.user, AnonymousUser)
if is_logged_in:
# if we are authenticated
session_cart = get_cart_from_session(request)
if session_cart and session_cart.user == request.user:
# and the session cart already belongs to us, we are done
cart = session_cart
elif session_cart and not session_cart.is_empty and session_cart.user != request.user:
# if it does not belong to us yet
database_cart = get_cart_from_database(request)
if database_cart:
# and there already is a cart that belongs to us in the database
# delete the old database cart
database_cart.delete()
# save the user to the new one from the session
session_cart.user = request.user
session_cart.save()
cart = session_cart
else:
# if there is no session_cart, or it's empty, use the database cart
cart = get_cart_from_database(request)
if cart:
# and save it to the session
request.session['cart_id'] = cart.pk
else:
# not authenticated? cart might be in session
cart = get_cart_from_session(request)
if not cart:
# in case it's our first visit and no cart was created yet
if is_logged_in:
cart = Cart(user=request.user)
elif getattr(request, 'session', None) is not None:
cart = Cart()
if save and not cart.pk:
cart.save()
request.session['cart_id'] = cart.pk
setattr(request, '_cart', cart)
cart = getattr(request, '_cart') # There we *must* have a cart
return cart
|
creimers/django-shop
|
shop/util/cart.py
|
Python
|
bsd-3-clause
| 3,099
|
[
"VisIt"
] |
be8889b2aec0910c984d13b58cd6a8ce5d5c1d2453fb1849e9439217159480b6
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# hashid.py - Software to identify the different types of hashes
# Copyright (C) 2013-2015 by c0re <c0re@psypanda.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import io
import os
import re
import sys
import argparse
from collections import namedtuple
__author__ = "c0re"
__version__ = "3.2.0-dev"
__github__ = "https://github.com/psypanda/hashID"
__license__ = "License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>"
__banner__ = "hashID v{0} by {1} ({2})".format(__version__, __author__, __github__)
Prototype = namedtuple('Prototype', ['regex', 'modes'])
HashInfo = namedtuple('HashInfo', ['name', 'hashcat', 'john', 'extended'])
prototypes = [
Prototype(
regex=re.compile(r'^[a-f0-9]{4}$', re.IGNORECASE),
modes=[
HashInfo(name='CRC-16', hashcat=None, john=None, extended=False),
HashInfo(name='CRC-16-CCITT', hashcat=None, john=None, extended=False),
HashInfo(name='FCS-16', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{8}$', re.IGNORECASE),
modes=[
HashInfo(name='Adler-32', hashcat=None, john=None, extended=False),
HashInfo(name='CRC-32B', hashcat=None, john=None, extended=False),
HashInfo(name='FCS-32', hashcat=None, john=None, extended=False),
HashInfo(name='GHash-32-3', hashcat=None, john=None, extended=False),
HashInfo(name='GHash-32-5', hashcat=None, john=None, extended=False),
HashInfo(name='FNV-132', hashcat=None, john=None, extended=False),
HashInfo(name='Fletcher-32', hashcat=None, john=None, extended=False),
HashInfo(name='Joaat', hashcat=None, john=None, extended=False),
HashInfo(name='ELF-32', hashcat=None, john=None, extended=False),
HashInfo(name='XOR-32', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{6}$', re.IGNORECASE),
modes=[
HashInfo(name='CRC-24', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$crc32\$[a-f0-9]{8}.)?[a-f0-9]{8}$', re.IGNORECASE),
modes=[
HashInfo(name='CRC-32', hashcat=None, john='crc32', extended=False)]),
Prototype(
regex=re.compile(r'^\+[a-z0-9\/.]{12}$', re.IGNORECASE),
modes=[
HashInfo(name='Eggdrop IRC Bot', hashcat=None, john='bfegg', extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9\/.]{13}$', re.IGNORECASE),
modes=[
HashInfo(name='DES(Unix)', hashcat=1500, john='descrypt', extended=False),
HashInfo(name='Traditional DES', hashcat=1500, john='descrypt', extended=False),
HashInfo(name='DEScrypt', hashcat=1500, john='descrypt', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{16}$', re.IGNORECASE),
modes=[
HashInfo(name='MySQL323', hashcat=200, john='mysql', extended=False),
HashInfo(name='DES(Oracle)', hashcat=3100, john=None, extended=False),
HashInfo(name='Half MD5', hashcat=5100, john=None, extended=False),
HashInfo(name='Oracle 7-10g', hashcat=3100, john=None, extended=False),
HashInfo(name='FNV-164', hashcat=None, john=None, extended=False),
HashInfo(name='CRC-64', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9\/.]{16}$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco-PIX(MD5)', hashcat=2400, john='pix-md5', extended=False)]),
Prototype(
regex=re.compile(r'^\([a-z0-9\/+]{20}\)$', re.IGNORECASE),
modes=[
HashInfo(name='Lotus Notes/Domino 6', hashcat=8700, john='dominosec', extended=False)]),
Prototype(
regex=re.compile(r'^_[a-z0-9\/.]{19}$', re.IGNORECASE),
modes=[
HashInfo(name='BSDi Crypt', hashcat=None, john='bsdicrypt', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{24}$', re.IGNORECASE),
modes=[
HashInfo(name='CRC-96(ZIP)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9\/.]{24}$', re.IGNORECASE),
modes=[
HashInfo(name='Crypt16', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$md2\$)?[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='MD2', hashcat=None, john='md2', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}(:.+)?$', re.IGNORECASE),
modes=[
HashInfo(name='MD5', hashcat=0, john='raw-md5', extended=False),
HashInfo(name='MD4', hashcat=900, john='raw-md4', extended=False),
HashInfo(name='Double MD5', hashcat=2600, john=None, extended=False),
HashInfo(name='LM', hashcat=3000, john='lm', extended=False),
HashInfo(name='RIPEMD-128', hashcat=None, john='ripemd-128', extended=False),
HashInfo(name='Haval-128', hashcat=None, john='haval-128-4', extended=False),
HashInfo(name='Tiger-128', hashcat=None, john=None, extended=False),
HashInfo(name='Skein-256(128)', hashcat=None, john=None, extended=False),
HashInfo(name='Skein-512(128)', hashcat=None, john=None, extended=False),
HashInfo(name='Lotus Notes/Domino 5', hashcat=8600, john='lotus5', extended=False),
HashInfo(name='Skype', hashcat=23, john=None, extended=False),
HashInfo(name='ZipMonster', hashcat=None, john=None, extended=True),
HashInfo(name='PrestaShop', hashcat=11000, john=None, extended=True),
HashInfo(name='md5(md5(md5($pass)))', hashcat=3500, john=None, extended=True),
HashInfo(name='md5(strtoupper(md5($pass)))', hashcat=4300, john=None, extended=True),
HashInfo(name='md5(sha1($pass))', hashcat=4400, john=None, extended=True),
HashInfo(name='md5($pass.$salt)', hashcat=10, john=None, extended=True),
HashInfo(name='md5($salt.$pass)', hashcat=20, john=None, extended=True),
HashInfo(name='md5(unicode($pass).$salt)', hashcat=30, john=None, extended=True),
HashInfo(name='md5($salt.unicode($pass))', hashcat=40, john=None, extended=True),
HashInfo(name='HMAC-MD5 (key = $pass)', hashcat=50, john='hmac-md5', extended=True),
HashInfo(name='HMAC-MD5 (key = $salt)', hashcat=60, john='hmac-md5', extended=True),
HashInfo(name='md5(md5($salt).$pass)', hashcat=3610, john=None, extended=True),
HashInfo(name='md5($salt.md5($pass))', hashcat=3710, john=None, extended=True),
HashInfo(name='md5($pass.md5($salt))', hashcat=3720, john=None, extended=True),
HashInfo(name='md5($salt.$pass.$salt)', hashcat=3810, john=None, extended=True),
HashInfo(name='md5(md5($pass).md5($salt))', hashcat=3910, john=None, extended=True),
HashInfo(name='md5($salt.md5($salt.$pass))', hashcat=4010, john=None, extended=True),
HashInfo(name='md5($salt.md5($pass.$salt))', hashcat=4110, john=None, extended=True),
HashInfo(name='md5($username.0.$pass)', hashcat=4210, john=None, extended=True)]),
Prototype(
regex=re.compile(r'^(\$snefru\$)?[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='Snefru-128', hashcat=None, john='snefru-128', extended=False)]),
Prototype(
regex=re.compile(r'^(\$NT\$)?[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='NTLM', hashcat=1000, john='nt', extended=False)]),
Prototype(
regex=re.compile(r'^([^\\\/:*?"<>|]{1,20}:)?[a-f0-9]{32}(:[^\\\/:*?"<>|]{1,20})?$', re.IGNORECASE),
modes=[
HashInfo(name='Domain Cached Credentials', hashcat=1100, john='mscach', extended=False)]),
Prototype(
regex=re.compile(r'^([^\\\/:*?"<>|]{1,20}:)?(\$DCC2\$10240#[^\\\/:*?"<>|]{1,20}#)?[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='Domain Cached Credentials 2', hashcat=2100, john='mscach2', extended=False)]),
Prototype(
regex=re.compile(r'^{SHA}[a-z0-9\/+]{27}=$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-1(Base64)', hashcat=101, john='nsldap', extended=False),
HashInfo(name='Netscape LDAP SHA', hashcat=101, john='nsldap', extended=False)]),
Prototype(
regex=re.compile(r'^\$1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}(:.*)?$', re.IGNORECASE),
modes=[
HashInfo(name='MD5 Crypt', hashcat=500, john='md5crypt', extended=False),
HashInfo(name='Cisco-IOS(MD5)', hashcat=500, john='md5crypt', extended=False),
HashInfo(name='FreeBSD MD5', hashcat=500, john='md5crypt', extended=False)]),
Prototype(
regex=re.compile(r'^0x[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='Lineage II C4', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$H\$[a-z0-9\/.]{31}$', re.IGNORECASE),
modes=[
HashInfo(name='phpBB v3.x', hashcat=400, john='phpass', extended=False),
HashInfo(name='Wordpress v2.6.0/2.6.1', hashcat=400, john='phpass', extended=False),
HashInfo(name="PHPass' Portable Hash", hashcat=400, john='phpass', extended=False)]),
Prototype(
regex=re.compile(r'^\$P\$[a-z0-9\/.]{31}$', re.IGNORECASE),
modes=[
HashInfo(name=u'Wordpress ≥ v2.6.2', hashcat=400, john='phpass', extended=False),
HashInfo(name=u'Joomla ≥ v2.5.18', hashcat=400, john='phpass', extended=False),
HashInfo(name="PHPass' Portable Hash", hashcat=400, john='phpass', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:[a-z0-9]{2}$', re.IGNORECASE),
modes=[
HashInfo(name='osCommerce', hashcat=21, john=None, extended=False),
HashInfo(name='xt:Commerce', hashcat=21, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$apr1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}$', re.IGNORECASE),
modes=[
HashInfo(name='MD5(APR)', hashcat=1600, john=None, extended=False),
HashInfo(name='Apache MD5', hashcat=1600, john=None, extended=False),
HashInfo(name='md5apr1', hashcat=1600, john=None, extended=True)]),
Prototype(
regex=re.compile(r'^{smd5}[a-z0-9$\/.]{31}$', re.IGNORECASE),
modes=[
HashInfo(name='AIX(smd5)', hashcat=6300, john='aix-smd5', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='WebEdition CMS', hashcat=3721, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:.{5}$', re.IGNORECASE),
modes=[
HashInfo(name=u'IP.Board ≥ v2+', hashcat=2811, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:.{8}$', re.IGNORECASE),
modes=[
HashInfo(name=u'MyBB ≥ v1.2+', hashcat=2811, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9]{34}$', re.IGNORECASE),
modes=[
HashInfo(name='CryptoCurrency(Adress)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{40}(:.+)?$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-1', hashcat=100, john='raw-sha1', extended=False),
HashInfo(name='Double SHA-1', hashcat=4500, john=None, extended=False),
HashInfo(name='RIPEMD-160', hashcat=6000, john='ripemd-160', extended=False),
HashInfo(name='Haval-160', hashcat=None, john=None, extended=False),
HashInfo(name='Tiger-160', hashcat=None, john=None, extended=False),
HashInfo(name='HAS-160', hashcat=None, john=None, extended=False),
HashInfo(name='LinkedIn', hashcat=190, john='raw-sha1-linkedin', extended=False),
HashInfo(name='Skein-256(160)', hashcat=None, john=None, extended=False),
HashInfo(name='Skein-512(160)', hashcat=None, john=None, extended=False),
HashInfo(name='MangosWeb Enhanced CMS', hashcat=None, john=None, extended=True),
HashInfo(name='sha1(sha1(sha1($pass)))', hashcat=4600, john=None, extended=True),
HashInfo(name='sha1(md5($pass))', hashcat=4700, john=None, extended=True),
HashInfo(name='sha1($pass.$salt)', hashcat=110, john=None, extended=True),
HashInfo(name='sha1($salt.$pass)', hashcat=120, john=None, extended=True),
HashInfo(name='sha1(unicode($pass).$salt)', hashcat=130, john=None, extended=True),
HashInfo(name='sha1($salt.unicode($pass))', hashcat=140, john=None, extended=True),
HashInfo(name='HMAC-SHA1 (key = $pass)', hashcat=150, john='hmac-sha1', extended=True),
HashInfo(name='HMAC-SHA1 (key = $salt)', hashcat=160, john='hmac-sha1', extended=True),
HashInfo(name='sha1($salt.$pass.$salt)', hashcat=4710, john=None, extended=True)]),
Prototype(
regex=re.compile(r'^\*[a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='MySQL5.x', hashcat=300, john='mysql-sha1', extended=False),
HashInfo(name='MySQL4.1', hashcat=300, john='mysql-sha1', extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9]{43}$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco-IOS(SHA-256)', hashcat=5700, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^{SSHA}[a-z0-9\/+]{38}==$', re.IGNORECASE),
modes=[
HashInfo(name='SSHA-1(Base64)', hashcat=111, john='nsldaps', extended=False),
HashInfo(name='Netscape LDAP SSHA', hashcat=111, john='nsldaps', extended=False),
HashInfo(name='nsldaps', hashcat=111, john='nsldaps', extended=True)]),
Prototype(
regex=re.compile(r'^[a-z0-9=]{47}$', re.IGNORECASE),
modes=[
HashInfo(name='Fortigate(FortiOS)', hashcat=7000, john='fortigate', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{48}$', re.IGNORECASE),
modes=[
HashInfo(name='Haval-192', hashcat=None, john=None, extended=False),
HashInfo(name='Tiger-192', hashcat=None, john='tiger', extended=False),
HashInfo(name='SHA-1(Oracle)', hashcat=None, john=None, extended=False),
HashInfo(name='OSX v10.4', hashcat=122, john='xsha', extended=False),
HashInfo(name='OSX v10.5', hashcat=122, john='xsha', extended=False),
HashInfo(name='OSX v10.6', hashcat=122, john='xsha', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{51}$', re.IGNORECASE),
modes=[
HashInfo(name='Palshop CMS', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9]{51}$', re.IGNORECASE),
modes=[
HashInfo(name='CryptoCurrency(PrivateKey)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^{ssha1}[0-9]{2}\$[a-z0-9$\/.]{44}$', re.IGNORECASE),
modes=[
HashInfo(name='AIX(ssha1)', hashcat=6700, john='aix-ssha1', extended=False)]),
Prototype(
regex=re.compile(r'^0x0100[a-f0-9]{48}$', re.IGNORECASE),
modes=[
HashInfo(name='MSSQL(2005)', hashcat=132, john='mssql05', extended=False),
HashInfo(name='MSSQL(2008)', hashcat=132, john='mssql05', extended=False)]),
Prototype(
regex=re.compile(r'^(\$md5,rounds=[0-9]+\$|\$md5\$rounds=[0-9]+\$|\$md5\$)[a-z0-9\/.]{0,16}(\$|\$\$)[a-z0-9\/.]{22}$', re.IGNORECASE),
modes=[
HashInfo(name='Sun MD5 Crypt', hashcat=3300, john='sunmd5', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{56}$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-224', hashcat=None, john='raw-sha224', extended=False),
HashInfo(name='Haval-224', hashcat=None, john=None, extended=False),
HashInfo(name='SHA3-224', hashcat=None, john=None, extended=False),
HashInfo(name='Skein-256(224)', hashcat=None, john=None, extended=False),
HashInfo(name='Skein-512(224)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$2[axy]|\$2)\$[0-9]{2}\$[a-z0-9\/.]{53}$', re.IGNORECASE),
modes=[
HashInfo(name='Blowfish(OpenBSD)', hashcat=3200, john='bcrypt', extended=False),
HashInfo(name='Woltlab Burning Board 4.x', hashcat=None, john=None, extended=False),
HashInfo(name='bcrypt', hashcat=3200, john='bcrypt', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{40}:[a-f0-9]{16}$', re.IGNORECASE),
modes=[
HashInfo(name='Android PIN', hashcat=5800, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(S:)?[a-f0-9]{40}(:)?[a-f0-9]{20}$', re.IGNORECASE),
modes=[
HashInfo(name='Oracle 11g/12c', hashcat=112, john='oracle11', extended=False)]),
Prototype(
regex=re.compile(r'^\$bcrypt-sha256\$(2[axy]|2)\,[0-9]+\$[a-z0-9\/.]{22}\$[a-z0-9\/.]{31}$', re.IGNORECASE),
modes=[
HashInfo(name='bcrypt(SHA-256)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:.{3}$', re.IGNORECASE),
modes=[
HashInfo(name='vBulletin < v3.8.5', hashcat=2611, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:.{30}$', re.IGNORECASE),
modes=[
HashInfo(name=u'vBulletin ≥ v3.8.5', hashcat=2711, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$snefru\$)?[a-f0-9]{64}$', re.IGNORECASE),
modes=[
HashInfo(name='Snefru-256', hashcat=None, john='snefru-256', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{64}(:.+)?$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-256', hashcat=1400, john='raw-sha256', extended=False),
HashInfo(name='RIPEMD-256', hashcat=None, john=None, extended=False),
HashInfo(name='Haval-256', hashcat=None, john='haval-256-3', extended=False),
HashInfo(name='GOST R 34.11-94', hashcat=6900, john='gost', extended=False),
HashInfo(name='GOST CryptoPro S-Box', hashcat=None, john=None, extended=False),
HashInfo(name='SHA3-256', hashcat=5000, john='raw-keccak-256', extended=False),
HashInfo(name='Skein-256', hashcat=None, john='skein-256', extended=False),
HashInfo(name='Skein-512(256)', hashcat=None, john=None, extended=False),
HashInfo(name='Ventrilo', hashcat=None, john=None, extended=True),
HashInfo(name='sha256($pass.$salt)', hashcat=1410, john=None, extended=True),
HashInfo(name='sha256($salt.$pass)', hashcat=1420, john=None, extended=True),
HashInfo(name='sha256(unicode($pass).$salt)', hashcat=1430, john=None, extended=True),
HashInfo(name='sha256($salt.unicode($pass))', hashcat=1440, john=None, extended=True),
HashInfo(name='HMAC-SHA256 (key = $pass)', hashcat=1450, john='hmac-sha256', extended=True),
HashInfo(name='HMAC-SHA256 (key = $salt)', hashcat=1460, john='hmac-sha256', extended=True)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:[a-z0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='Joomla < v2.5.18', hashcat=11, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f-0-9]{32}:[a-f-0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='SAM(LM_Hash:NT_Hash)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$chap\$0\*)?[a-f0-9]{32}[\*:][a-f0-9]{32}(:[0-9]{2})?$', re.IGNORECASE),
modes=[
HashInfo(name='MD5(Chap)', hashcat=4800, john='chap', extended=False),
HashInfo(name='iSCSI CHAP Authentication', hashcat=4800, john='chap', extended=False)]),
Prototype(
regex=re.compile(r'^\$episerver\$\*0\*[a-z0-9\/=+]+\*[a-z0-9\/=+]{27,28}$', re.IGNORECASE),
modes=[
HashInfo(name='EPiServer 6.x < v4', hashcat=141, john='episerver', extended=False)]),
Prototype(
regex=re.compile(r'^{ssha256}[0-9]{2}\$[a-z0-9$\/.]{60}$', re.IGNORECASE),
modes=[
HashInfo(name='AIX(ssha256)', hashcat=6400, john='aix-ssha256', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{80}$', re.IGNORECASE),
modes=[
HashInfo(name='RIPEMD-320', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$episerver\$\*1\*[a-z0-9\/=+]+\*[a-z0-9\/=+]{42,43}$', re.IGNORECASE),
modes=[
HashInfo(name=u'EPiServer 6.x ≥ v4', hashcat=1441, john='episerver', extended=False)]),
Prototype(
regex=re.compile(r'^0x0100[a-f0-9]{88}$', re.IGNORECASE),
modes=[
HashInfo(name='MSSQL(2000)', hashcat=131, john='mssql', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{96}$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-384', hashcat=10800, john='raw-sha384', extended=False),
HashInfo(name='SHA3-384', hashcat=None, john=None, extended=False),
HashInfo(name='Skein-512(384)', hashcat=None, john=None, extended=False),
HashInfo(name='Skein-1024(384)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^{SSHA512}[a-z0-9\/+]{96}$', re.IGNORECASE),
modes=[
HashInfo(name='SSHA-512(Base64)', hashcat=1711, john='ssha512', extended=False),
HashInfo(name='LDAP(SSHA-512)', hashcat=1711, john='ssha512', extended=False)]),
Prototype(
regex=re.compile(r'^{ssha512}[0-9]{2}\$[a-z0-9\/.]{16,48}\$[a-z0-9\/.]{86}$', re.IGNORECASE),
modes=[
HashInfo(name='AIX(ssha512)', hashcat=6500, john='aix-ssha512', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{128}(:.+)?$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-512', hashcat=1700, john='raw-sha512', extended=False),
HashInfo(name='Whirlpool', hashcat=6100, john='whirlpool', extended=False),
HashInfo(name='Salsa10', hashcat=None, john=None, extended=False),
HashInfo(name='Salsa20', hashcat=None, john=None, extended=False),
HashInfo(name='SHA3-512', hashcat=None, john='raw-keccak', extended=False),
HashInfo(name='Skein-512', hashcat=None, john='skein-512', extended=False),
HashInfo(name='Skein-1024(512)', hashcat=None, john=None, extended=False),
HashInfo(name='sha512($pass.$salt)', hashcat=1710, john=None, extended=True),
HashInfo(name='sha512($salt.$pass)', hashcat=1720, john=None, extended=True),
HashInfo(name='sha512(unicode($pass).$salt)', hashcat=1730, john=None, extended=True),
HashInfo(name='sha512($salt.unicode($pass))', hashcat=1740, john=None, extended=True),
HashInfo(name='HMAC-SHA512 (key = $pass)', hashcat=1750, john='hmac-sha512', extended=True),
HashInfo(name='HMAC-SHA512 (key = $salt)', hashcat=1760, john='hmac-sha512', extended=True)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{136}$', re.IGNORECASE),
modes=[
HashInfo(name='OSX v10.7', hashcat=1722, john='xsha512', extended=False)]),
Prototype(
regex=re.compile(r'^0x0200[a-f0-9]{136}$', re.IGNORECASE),
modes=[
HashInfo(name='MSSQL(2012)', hashcat=1731, john='msql12', extended=False),
HashInfo(name='MSSQL(2014)', hashcat=1731, john='msql12', extended=False)]),
Prototype(
regex=re.compile(r'^\$ml\$[0-9]+\$[a-f0-9]{64}\$[a-f0-9]{128}$', re.IGNORECASE),
modes=[
HashInfo(name='OSX v10.8', hashcat=7100, john='pbkdf2-hmac-sha512', extended=False),
HashInfo(name='OSX v10.9', hashcat=7100, john='pbkdf2-hmac-sha512', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{256}$', re.IGNORECASE),
modes=[
HashInfo(name='Skein-1024', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^grub\.pbkdf2\.sha512\.[0-9]+\.([a-f0-9]{128,2048}\.|[0-9]+\.)?[a-f0-9]{128}$', re.IGNORECASE),
modes=[
HashInfo(name='GRUB 2', hashcat=7200, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^sha1\$[a-z0-9]+\$[a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(SHA-1)', hashcat=124, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{49}$', re.IGNORECASE),
modes=[
HashInfo(name='Citrix Netscaler', hashcat=8100, john='citrix_ns10', extended=False)]),
Prototype(
regex=re.compile(r'^\$S\$[a-z0-9\/.]{52}$', re.IGNORECASE),
modes=[
HashInfo(name='Drupal > v7.x', hashcat=7900, john='drupal7', extended=False)]),
Prototype(
regex=re.compile(r'^\$5\$(rounds=[0-9]+\$)?[a-z0-9\/.]{0,16}\$[a-z0-9\/.]{43}$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-256 Crypt', hashcat=7400, john='sha256crypt', extended=False)]),
Prototype(
regex=re.compile(r'^0x[a-f0-9]{4}[a-f0-9]{16}[a-f0-9]{64}$', re.IGNORECASE),
modes=[
HashInfo(name='Sybase ASE', hashcat=8000, john='sybasease', extended=False)]),
Prototype(
regex=re.compile(r'^\$6\$(rounds=[0-9]+\$)?[a-z0-9\/.]{0,16}\$[a-z0-9\/.]{86}$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-512 Crypt', hashcat=1800, john='sha512crypt', extended=False)]),
Prototype(
regex=re.compile(r'^\$sha\$[a-z0-9]{1,16}\$([a-f0-9]{32}|[a-f0-9]{40}|[a-f0-9]{64}|[a-f0-9]{128}|[a-f0-9]{140})$', re.IGNORECASE),
modes=[
HashInfo(name='Minecraft(AuthMe Reloaded)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^sha256\$[a-z0-9]+\$[a-f0-9]{64}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(SHA-256)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^sha384\$[a-z0-9]+\$[a-f0-9]{96}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(SHA-384)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^crypt1:[a-z0-9+=]{12}:[a-z0-9+=]{12}$', re.IGNORECASE),
modes=[
HashInfo(name='Clavister Secure Gateway', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{112}$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco VPN Client(PCF-File)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{1329}$', re.IGNORECASE),
modes=[
HashInfo(name='Microsoft MSTSC(RDP-File)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[^\\\/:*?"<>|]{1,20}[:]{2,3}([^\\\/:*?"<>|]{1,20})?:[a-f0-9]{48}:[a-f0-9]{48}:[a-f0-9]{16}$', re.IGNORECASE),
modes=[
HashInfo(name='NetNTLMv1-VANILLA / NetNTLMv1+ESS', hashcat=5500, john='netntlm', extended=False)]),
Prototype(
regex=re.compile(r'^([^\\\/:*?"<>|]{1,20}\\)?[^\\\/:*?"<>|]{1,20}[:]{2,3}([^\\\/:*?"<>|]{1,20}:)?[^\\\/:*?"<>|]{1,20}:[a-f0-9]{32}:[a-f0-9]+$', re.IGNORECASE),
modes=[
HashInfo(name='NetNTLMv2', hashcat=5600, john='netntlmv2', extended=False)]),
Prototype(
regex=re.compile(r'^\$(krb5pa|mskrb5)\$([0-9]{2})?\$.+\$[a-f0-9]{1,}$', re.IGNORECASE),
modes=[
HashInfo(name='Kerberos 5 AS-REQ Pre-Auth', hashcat=7500, john='krb5pa-md5', extended=False)]),
Prototype(
regex=re.compile(r'^\$scram\$[0-9]+\$[a-z0-9\/.]{16}\$sha-1=[a-z0-9\/.]{27},sha-256=[a-z0-9\/.]{43},sha-512=[a-z0-9\/.]{86}$', re.IGNORECASE),
modes=[
HashInfo(name='SCRAM Hash', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{40}:[a-f0-9]{0,32}$', re.IGNORECASE),
modes=[
HashInfo(name='Redmine Project Management Web App', hashcat=7600, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(.+)?\$[a-f0-9]{16}$', re.IGNORECASE),
modes=[
HashInfo(name='SAP CODVN B (BCODE)', hashcat=7700, john='sapb', extended=False)]),
Prototype(
regex=re.compile(r'^(.+)?\$[a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='SAP CODVN F/G (PASSCODE)', hashcat=7800, john='sapg', extended=False)]),
Prototype(
regex=re.compile(r'^(.+\$)?[a-z0-9\/.+]{30}(:.+)?$', re.IGNORECASE),
modes=[
HashInfo(name='Juniper Netscreen/SSG(ScreenOS)', hashcat=22, john='md5ns', extended=False)]),
Prototype(
regex=re.compile(r'^0x[a-f0-9]{60}\s0x[a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='EPi', hashcat=123, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{40}:[^*]{1,25}$', re.IGNORECASE),
modes=[
HashInfo(name=u'SMF ≥ v1.1', hashcat=121, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$wbb3\$\*1\*)?[a-f0-9]{40}[:*][a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='Woltlab Burning Board 3.x', hashcat=8400, john='wbb3', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{130}(:[a-f0-9]{40})?$', re.IGNORECASE),
modes=[
HashInfo(name='IPMI2 RAKP HMAC-SHA1', hashcat=7300, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{32}:[0-9]+:[a-z0-9_.+-]+@[a-z0-9-]+\.[a-z0-9-.]+$', re.IGNORECASE),
modes=[
HashInfo(name='Lastpass', hashcat=6800, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9\/.]{16}([:$].{1,})?$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco-ASA(MD5)', hashcat=2410, john='asa-md5', extended=False)]),
Prototype(
regex=re.compile(r'^\$vnc\$\*[a-f0-9]{32}\*[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='VNC', hashcat=None, john='vnc', extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9]{32}(:([a-z0-9-]+\.)?[a-z0-9-.]+\.[a-z]{2,7}:.+:[0-9]+)?$', re.IGNORECASE),
modes=[
HashInfo(name='DNSSEC(NSEC3)', hashcat=8300, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(user-.+:)?\$racf\$\*.+\*[a-f0-9]{16}$', re.IGNORECASE),
modes=[
HashInfo(name='RACF', hashcat=8500, john='racf', extended=False)]),
Prototype(
regex=re.compile(r'^\$3\$\$[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='NTHash(FreeBSD Variant)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$sha1\$[0-9]+\$[a-z0-9\/.]{0,64}\$[a-z0-9\/.]{28}$', re.IGNORECASE),
modes=[
HashInfo(name='SHA-1 Crypt', hashcat=None, john='sha1crypt', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{70}$', re.IGNORECASE),
modes=[
HashInfo(name='hMailServer', hashcat=1421, john='hmailserver', extended=False)]),
Prototype(
regex=re.compile(r'^[:\$][AB][:\$]([a-f0-9]{1,8}[:\$])?[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='MediaWiki', hashcat=3711, john='mediawiki', extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{140}$', re.IGNORECASE),
modes=[
HashInfo(name='Minecraft(xAuth)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$pbkdf2(-sha1)?\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{27}$', re.IGNORECASE),
modes=[
HashInfo(name='PBKDF2-SHA1(Generic)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$pbkdf2-sha256\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{43}$', re.IGNORECASE),
modes=[
HashInfo(name='PBKDF2-SHA256(Generic)', hashcat=None, john='pbkdf2-hmac-sha256', extended=False)]),
Prototype(
regex=re.compile(r'^\$pbkdf2-sha512\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{86}$', re.IGNORECASE),
modes=[
HashInfo(name='PBKDF2-SHA512(Generic)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$p5k2\$[0-9]+\$[a-z0-9\/+=-]+\$[a-z0-9\/+-]{27}=$', re.IGNORECASE),
modes=[
HashInfo(name='PBKDF2(Cryptacular)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$p5k2\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='PBKDF2(Dwayne Litzenberger)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^{FSHP[0123]\|[0-9]+\|[0-9]+}[a-z0-9\/+=]+$', re.IGNORECASE),
modes=[
HashInfo(name='Fairly Secure Hashed Password', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$PHPS\$.+\$[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='PHPS', hashcat=2612, john='phps', extended=False)]),
Prototype(
regex=re.compile(r'^[0-9]{4}:[a-f0-9]{16}:[a-f0-9]{2080}$', re.IGNORECASE),
modes=[
HashInfo(name='1Password(Agile Keychain)', hashcat=6600, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{64}:[a-f0-9]{32}:[0-9]{5}:[a-f0-9]{608}$', re.IGNORECASE),
modes=[
HashInfo(name='1Password(Cloud Keychain)', hashcat=8200, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{256}:[a-f0-9]{256}:[a-f0-9]{16}:[a-f0-9]{16}:[a-f0-9]{320}:[a-f0-9]{16}:[a-f0-9]{40}:[a-f0-9]{40}:[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='IKE-PSK MD5', hashcat=5300, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{256}:[a-f0-9]{256}:[a-f0-9]{16}:[a-f0-9]{16}:[a-f0-9]{320}:[a-f0-9]{16}:[a-f0-9]{40}:[a-f0-9]{40}:[a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='IKE-PSK SHA1', hashcat=5400, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-z0-9\/+]{27}=$', re.IGNORECASE),
modes=[
HashInfo(name='PeopleSoft', hashcat=133, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^crypt\$[a-f0-9]{5}\$[a-z0-9\/.]{13}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(DES Crypt Wrapper)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$django\$\*1\*)?pbkdf2_sha256\$[0-9]+\$[a-z0-9]+\$[a-z0-9\/+=]{44}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(PBKDF2-HMAC-SHA256)', hashcat=10000, john='django', extended=False)]),
Prototype(
regex=re.compile(r'^pbkdf2_sha1\$[0-9]+\$[a-z0-9]+\$[a-z0-9\/+=]{28}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(PBKDF2-HMAC-SHA1)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^bcrypt(\$2[axy]|\$2)\$[0-9]{2}\$[a-z0-9\/.]{53}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(bcrypt)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^md5\$[a-f0-9]+\$[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(MD5)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\{PKCS5S2\}[a-z0-9\/+]{64}$', re.IGNORECASE),
modes=[
HashInfo(name='PBKDF2(Atlassian)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^md5[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='PostgreSQL MD5', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\([a-z0-9\/+]{49}\)$', re.IGNORECASE),
modes=[
HashInfo(name='Lotus Notes/Domino 8', hashcat=9100, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^SCRYPT:[0-9]{1,}:[0-9]{1}:[0-9]{1}:[a-z0-9:\/+=]{1,}$', re.IGNORECASE),
modes=[
HashInfo(name='scrypt', hashcat=8900, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$8\$[a-z0-9\/.]{14}\$[a-z0-9\/.]{43}$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco Type 8', hashcat=9200, john='cisco8', extended=False)]),
Prototype(
regex=re.compile(r'^\$9\$[a-z0-9\/.]{14}\$[a-z0-9\/.]{43}$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco Type 9', hashcat=9300, john='cisco9', extended=False)]),
Prototype(
regex=re.compile(r'^\$office\$\*2007\*[0-9]{2}\*[0-9]{3}\*[0-9]{2}\*[a-z0-9]{32}\*[a-z0-9]{32}\*[a-z0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='Microsoft Office 2007', hashcat=9400, john='office', extended=False)]),
Prototype(
regex=re.compile(r'^\$office\$\*2010\*[0-9]{6}\*[0-9]{3}\*[0-9]{2}\*[a-z0-9]{32}\*[a-z0-9]{32}\*[a-z0-9]{64}$', re.IGNORECASE),
modes=[
HashInfo(name='Microsoft Office 2010', hashcat=9500, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$office\$\*2013\*[0-9]{6}\*[0-9]{3}\*[0-9]{2}\*[a-z0-9]{32}\*[a-z0-9]{32}\*[a-z0-9]{64}$', re.IGNORECASE),
modes=[
HashInfo(name='Microsoft Office 2013', hashcat=9600, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$fde\$[0-9]{2}\$[a-f0-9]{32}\$[0-9]{2}\$[a-f0-9]{32}\$[a-f0-9]{3072}$', re.IGNORECASE),
modes=[
HashInfo(name=u'Android FDE ≤ 4.3', hashcat=8800, john='fde', extended=False)]),
Prototype(
regex=re.compile(r'^\$oldoffice\$[01]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name=u'Microsoft Office ≤ 2003 (MD5+RC4)', hashcat=9700, john='oldoffice', extended=False),
HashInfo(name=u'Microsoft Office ≤ 2003 (MD5+RC4) collider-mode #1', hashcat=9710, john='oldoffice', extended=False),
HashInfo(name=u'Microsoft Office ≤ 2003 (MD5+RC4) collider-mode #2', hashcat=9720, john='oldoffice', extended=False)]),
Prototype(
regex=re.compile(r'^\$oldoffice\$[34]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name=u'Microsoft Office ≤ 2003 (SHA1+RC4)', hashcat=9800, john=None, extended=False),
HashInfo(name=u'Microsoft Office ≤ 2003 (SHA1+RC4) collider-mode #1', hashcat=9810, john=None, extended=False),
HashInfo(name=u'Microsoft Office ≤ 2003 (SHA1+RC4) collider-mode #2', hashcat=9820, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$radmin2\$)?[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='RAdmin v2.x', hashcat=9900, john='radmin', extended=False)]),
Prototype(
regex=re.compile(r'^{x-issha,\s[0-9]{4}}[a-z0-9\/+=]+$', re.IGNORECASE),
modes=[
HashInfo(name='SAP CODVN H (PWDSALTEDHASH) iSSHA-1', hashcat=10300, john='saph', extended=False)]),
Prototype(
regex=re.compile(r'^\$cram_md5\$[a-z0-9\/+=-]+\$[a-z0-9\/+=-]{52}$', re.IGNORECASE),
modes=[
HashInfo(name='CRAM-MD5', hashcat=10200, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{16}:2:4:[a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='SipHash', hashcat=10100, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^[a-f0-9]{4,}$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco Type 7', hashcat=None, john=None, extended=True)]),
Prototype(
regex=re.compile(r'^[a-z0-9\/.]{13,}$', re.IGNORECASE),
modes=[
HashInfo(name='BigCrypt', hashcat=None, john='bigcrypt', extended=True)]),
Prototype(
regex=re.compile(r'^(\$cisco4\$)?[a-z0-9\/.]{43}$', re.IGNORECASE),
modes=[
HashInfo(name='Cisco Type 4', hashcat=None, john='cisco4', extended=False)]),
Prototype(
regex=re.compile(r'^bcrypt_sha256\$\$(2[axy]|2)\$[0-9]+\$[a-z0-9\/.]{53}$', re.IGNORECASE),
modes=[
HashInfo(name='Django(bcrypt-SHA256)', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$postgres\$.[^\*]+[*:][a-f0-9]{1,32}[*:][a-f0-9]{32}$', re.IGNORECASE),
modes=[
HashInfo(name='PostgreSQL Challenge-Response Authentication (MD5)', hashcat=11100, john='postgres', extended=False)]),
Prototype(
regex=re.compile(r'^\$siemens-s7\$[0-9]{1}\$[a-f0-9]{40}\$[a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='Siemens-S7', hashcat=None, john='siemens-s7', extended=False)]),
Prototype(
regex=re.compile(r'^(\$pst\$)?[a-f0-9]{8}$', re.IGNORECASE),
modes=[
HashInfo(name='Microsoft Outlook PST', hashcat=None, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^sha256[:$][0-9]+[:$][a-z0-9\/+]+[:$][a-z0-9\/+]{32,128}$', re.IGNORECASE),
modes=[
HashInfo(name='PBKDF2-HMAC-SHA256(PHP)', hashcat=10900, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^(\$dahua\$)?[a-z0-9]{8}$', re.IGNORECASE),
modes=[
HashInfo(name='Dahua', hashcat=None, john='dahua', extended=False)]),
Prototype(
regex=re.compile(r'^\$mysqlna\$[a-f0-9]{40}[:*][a-f0-9]{40}$', re.IGNORECASE),
modes=[
HashInfo(name='MySQL Challenge-Response Authentication (SHA1)', hashcat=11200, john=None, extended=False)]),
Prototype(
regex=re.compile(r'^\$pdf\$[24]\*[34]\*128\*[0-9-]{1,5}\*1\*(16|32)\*[a-f0-9]{32,64}\*32\*[a-f0-9]{64}\*(8|16|32)\*[a-f0-9]{16,64}$', re.IGNORECASE),
modes=[
HashInfo(name='PDF 1.4 - 1.6 (Acrobat 5 - 8)', hashcat=10500, john='pdf', extended=False)])
]
class HashID(object):
"""HashID with configurable prototypes"""
def __init__(self, prototypes=prototypes):
super(HashID, self).__init__()
# Set self.prototypes to a copy of prototypes to allow
# modification after instantiation
self.prototypes = list(prototypes)
def identifyHash(self, phash):
"""Returns identified HashInfo"""
phash = phash.strip()
for prototype in self.prototypes:
if prototype.regex.match(phash):
for mode in prototype.modes:
yield mode
def writeResult(identified_modes, hashcatMode=False, johnFormat=False, extended=False):
"""Write human readable output from identifyHash"""
count = 0
hashTypes = ""
for mode in identified_modes:
if not mode.extended or extended:
count += 1
hashTypes += u"[+] {0} ".format(mode.name)
if hashcatMode and mode.hashcat is not None:
hashTypes += "[Hashcat Mode: {0}]".format(mode.hashcat)
if johnFormat and mode.john is not None:
hashTypes += "[JtR Format: {0}]".format(mode.john)
hashTypes += "\n"
if count == 0:
return "[+] Unknown hash\n"
return hashTypes
def main():
usage = "{0} [-h] [-e] [-m] [-j] [-o FILE] [--version] INPUT".format(os.path.basename(__file__))
parser = argparse.ArgumentParser(
description="Identify the different types of hashes used to encrypt data",
usage=usage,
epilog=__license__,
add_help=False,
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=27)
)
parser.add_argument("strings",
metavar="INPUT", type=str, nargs="*",
help="input to analyze (default: STDIN)")
group = parser.add_argument_group('options')
group.add_argument("-e", "--extended",
action="store_true",
help="list all possible hash algorithms including salted passwords")
group.add_argument("-m", "--mode",
action="store_true",
help="show corresponding Hashcat mode in output")
group.add_argument("-j", "--john",
action="store_true",
help="show corresponding JohnTheRipper format in output")
group.add_argument("-o", "--outfile",
metavar="FILE", type=str,
help="write output to file")
group.add_argument("-h", "--help",
action="help",
help="show this help message and exit")
group.add_argument("--version",
action="version",
version=__banner__)
args = parser.parse_args()
hashID = HashID()
if not args.outfile:
outfile = sys.stdout
else:
try:
outfile = io.open(args.outfile, "w", encoding="utf-8")
except EnvironmentError:
parser.error("Could not open {0}".format(args.output))
if not args.strings or args.strings[0] == "-":
while True:
line = sys.stdin.readline()
if not line:
break
outfile.write(u"Analyzing '{0}'\n".format(line.strip()))
writeResult(hashID.identifyHash(line), outfile, args.mode, args.john, args.extended)
sys.stdout.flush()
else:
for string in args.strings:
if os.path.isfile(string):
try:
with io.open(string, "r", encoding="utf-8") as infile:
outfile.write("--File '{0}'--\n".format(string))
for line in infile:
if line.strip():
outfile.write(u"Analyzing '{0}'\n".format(line.strip()))
writeResult(hashID.identifyHash(line), outfile, args.mode, args.john, args.extended)
except (EnvironmentError, UnicodeDecodeError):
outfile.write("--File '{0}' - could not open--".format(string))
else:
outfile.write("--End of file '{0}'--".format(string))
else:
outfile.write(u"Analyzing '{0}'\n".format(string.strip()))
writeResult(hashID.identifyHash(string), outfile, args.mode, args.john, args.extended)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
Miskerest/IDCrash
|
hashID/hashid.py
|
Python
|
mit
| 47,274
|
[
"ASE"
] |
f5d39fdda0fb3335470e38da7860966d487376758c989ab1337c004388fa1141
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
r"""
This module contains classes useful for analyzing ferroelectric candidates.
The Polarization class can recover the spontaneous polarization using
multiple calculations along a nonpolar to polar ferroelectric distortion.
The EnergyTrend class is useful for assessing the trend in energy across
the distortion.
See Nicola Spaldin's "A beginner's guide to the modern theory of polarization"
(https://arxiv.org/abs/1202.1831) for an introduction to crystal polarization.
VASP reports dipole moment values (used to derive polarization) along Cartesian
directions (see pead.F around line 970 in the VASP source to confirm this).
However, it is most convenient to perform the adjustments necessary to recover
a same branch polarization by expressing the polarization along lattice directions.
For this reason, calc_ionic calculates ionic contributions to the polarization
along lattice directions. We provide the means to convert Cartesian direction
polarizations to lattice direction polarizations in the Polarization class.
We recommend using our calc_ionic function for calculating the ionic
polarization rather than the values from OUTCAR. We find that the ionic
dipole moment reported in OUTCAR differ from the naive calculation of
\\sum_i Z_i r_i where i is the index of the atom, Z_i is the ZVAL from the
pseudopotential file, and r is the distance in Angstroms along the lattice vectors.
Note, this difference is not simply due to VASP using Cartesian directions and
calc_ionic using lattice direction but rather how the ionic polarization is
computed. Compare calc_ionic to VASP SUBROUTINE POINT_CHARGE_DIPOL in dipol.F in
the VASP source to see the differences. We are able to recover a smooth same
branch polarization more frequently using the naive calculation in calc_ionic
than using the ionic dipole moment reported in the OUTCAR.
Some defintions of terms used in the comments below:
A polar structure belongs to a polar space group. A polar space group has a
one of the 10 polar point group:
(1, 2, m, mm2, 4, 4mm, 3, 3m, 6, 6m)
Being nonpolar is not equivalent to being centrosymmetric (having inversion
symmetry). For example, any space group with point group 222 is nonpolar but
not centrosymmetric.
By symmetry the polarization of a nonpolar material modulo the quantum
of polarization can only be zero or 1/2. We use a nonpolar structure to help
determine the spontaneous polarization because it serves as a reference point.
"""
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
__author__ = "Tess Smidt"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "1.0"
__email__ = "tsmidt@berkeley.edu"
__status__ = "Development"
__date__ = "April 15, 2017"
def zval_dict_from_potcar(potcar):
"""
Creates zval_dictionary for calculating the ionic polarization from
Potcar object
potcar: Potcar object
"""
zval_dict = {}
for p in potcar:
zval_dict.update({p.element: p.ZVAL})
return zval_dict
def calc_ionic(site, structure, zval):
"""
Calculate the ionic dipole moment using ZVAL from pseudopotential
site: PeriodicSite
structure: Structure
zval: Charge value for ion (ZVAL for VASP pseudopotential)
Returns polarization in electron Angstroms.
"""
norms = structure.lattice.lengths
return np.multiply(norms, -site.frac_coords * zval)
def get_total_ionic_dipole(structure, zval_dict):
"""
Get the total ionic dipole moment for a structure.
structure: pymatgen Structure
zval_dict: specie, zval dictionary pairs
center (np.array with shape [3,1]) : dipole center used by VASP
tiny (float) : tolerance for determining boundary of calculation.
"""
tot_ionic = []
for site in structure:
zval = zval_dict[str(site.specie)]
tot_ionic.append(calc_ionic(site, structure, zval))
return np.sum(tot_ionic, axis=0)
class PolarizationLattice(Structure):
"""
Why is a Lattice inheriting a structure? This is ridiculous.
"""
def get_nearest_site(self, coords, site, r=None):
"""
Given coords and a site, find closet site to coords.
Args:
coords (3x1 array): cartesian coords of center of sphere
site: site to find closest to coords
r: radius of sphere. Defaults to diagonal of unit cell
Returns:
Closest site and distance.
"""
index = self.index(site)
if r is None:
r = np.linalg.norm(np.sum(self.lattice.matrix, axis=0))
ns = self.get_sites_in_sphere(coords, r, include_index=True)
# Get sites with identical index to site
ns = [n for n in ns if n[2] == index]
# Sort by distance to coords
ns.sort(key=lambda x: x[1])
# Return PeriodicSite and distance of closest image
return ns[0][0:2]
class Polarization:
"""
Class for recovering the same branch polarization for a set of
polarization calculations along the nonpolar - polar distortion
path of a ferroelectric.
p_elecs, p_ions, and structures lists should be given in order
of nonpolar to polar! For example, the structures returned from:
nonpolar.interpolate(polar,interpolate_lattices=True)
if nonpolar is the nonpolar Structure and polar is the polar structure.
It is assumed that the electronic and ionic dipole moment values
are given in electron Angstroms along the three lattice directions
(a,b,c).
"""
def __init__(
self,
p_elecs,
p_ions,
structures,
p_elecs_in_cartesian=True,
p_ions_in_cartesian=False,
):
"""
p_elecs: np.array of electronic contribution to the polarization with shape [N, 3]
p_ions: np.array of ionic contribution to the polarization with shape [N, 3]
p_elecs_in_cartesian: whether p_elecs is along Cartesian directions (rather than lattice directions).
Default is True because that is the convention for VASP.
p_ions_in_cartesian: whether p_ions is along Cartesian directions (rather than lattice directions).
Default is False because calc_ionic (which we recommend using for calculating the ionic
contribution to the polarization) uses lattice directions.
"""
if len(p_elecs) != len(p_ions) or len(p_elecs) != len(structures):
raise ValueError("The number of electronic polarization and ionic polarization values must be equal.")
if p_elecs_in_cartesian:
p_elecs = np.array(
[struct.lattice.get_vector_along_lattice_directions(p_elecs[i]) for i, struct in enumerate(structures)]
)
if p_ions_in_cartesian:
p_ions = np.array(
[struct.lattice.get_vector_along_lattice_directions(p_ions[i]) for i, struct in enumerate(structures)]
)
self.p_elecs = np.array(p_elecs)
self.p_ions = np.array(p_ions)
self.structures = structures
@classmethod
def from_outcars_and_structures(cls, outcars, structures, calc_ionic_from_zval=False):
"""
Create Polarization object from list of Outcars and Structures in order
of nonpolar to polar.
Note, we recommend calculating the ionic dipole moment using calc_ionic
than using the values in Outcar (see module comments). To do this set
calc_ionic_from_zval = True
"""
p_elecs = []
p_ions = []
for i, o in enumerate(outcars):
p_elecs.append(o.p_elec)
if calc_ionic_from_zval:
p_ions.append(get_total_ionic_dipole(structures[i], o.zval_dict))
else:
p_ions.append(o.p_ion)
return cls(p_elecs, p_ions, structures)
def get_pelecs_and_pions(self, convert_to_muC_per_cm2=False):
"""
Get the electronic and ionic dipole moments / polarizations.
convert_to_muC_per_cm2: Convert from electron * Angstroms to microCoulomb
per centimeter**2
"""
if not convert_to_muC_per_cm2:
return self.p_elecs, self.p_ions
if convert_to_muC_per_cm2:
p_elecs = self.p_elecs.T
p_ions = self.p_ions.T
volumes = [s.lattice.volume for s in self.structures]
e_to_muC = -1.6021766e-13
cm2_to_A2 = 1e16
units = 1.0 / np.array(volumes)
units *= e_to_muC * cm2_to_A2
p_elecs = np.matmul(units, p_elecs)
p_ions = np.matmul(units, p_ions)
p_elecs, p_ions = p_elecs.T, p_ions.T
return p_elecs, p_ions
return None
def get_same_branch_polarization_data(self, convert_to_muC_per_cm2=True, all_in_polar=True):
r"""
Get same branch dipole moment (convert_to_muC_per_cm2=False)
or polarization for given polarization data (convert_to_muC_per_cm2=True).
Polarization is a lattice vector, meaning it is only defined modulo the
quantum of polarization:
P = P_0 + \\sum_i \\frac{n_i e R_i}{\\Omega}
where n_i is an integer, e is the charge of the electron in microCoulombs,
R_i is a lattice vector, and \\Omega is the unit cell volume in cm**3
(giving polarization units of microCoulomb per centimeter**2).
The quantum of the dipole moment in electron Angstroms (as given by VASP) is:
\\sum_i n_i e R_i
where e, the electron charge, is 1 and R_i is a lattice vector, and n_i is an integer.
Given N polarization calculations in order from nonpolar to polar, this algorithm
minimizes the distance between adjacent polarization images. To do this, it
constructs a polarization lattice for each polarization calculation using the
pymatgen.core.structure class and calls the get_nearest_site method to find the
image of a given polarization lattice vector that is closest to the previous polarization
lattice vector image.
Note, using convert_to_muC_per_cm2=True and all_in_polar=True calculates the "proper
polarization" (meaning the change in polarization does not depend on the choice of
polarization branch) while convert_to_muC_per_cm2=True and all_in_polar=False calculates
the "improper polarization" (meaning the change in polarization does depend on the choice
of branch). As one might guess from the names. We recommend calculating the "proper
polarization".
convert_to_muC_per_cm2: convert polarization from electron * Angstroms to
microCoulomb per centimeter**2
all_in_polar: convert polarization to be in polar (final structure) polarization lattice
"""
p_elec, p_ion = self.get_pelecs_and_pions()
p_tot = p_elec + p_ion
p_tot = np.array(p_tot)
lattices = [s.lattice for s in self.structures]
volumes = np.array([s.lattice.volume for s in self.structures])
L = len(p_elec)
e_to_muC = -1.6021766e-13
cm2_to_A2 = 1e16
units = 1.0 / np.array(volumes)
units *= e_to_muC * cm2_to_A2
# convert polarizations and lattice lengths prior to adjustment
if convert_to_muC_per_cm2 and not all_in_polar:
# Convert the total polarization
p_tot = np.multiply(units.T[:, np.newaxis], p_tot)
# adjust lattices
for i in range(L):
lattice = lattices[i]
l = lattice.lengths
a = lattice.angles
lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[i]), *a)
# convert polarizations to polar lattice
elif convert_to_muC_per_cm2 and all_in_polar:
abc = [lattice.abc for lattice in lattices]
abc = np.array(abc) # [N, 3]
p_tot /= abc # e * Angstroms to e
p_tot *= abc[-1] / volumes[-1] * e_to_muC * cm2_to_A2 # to muC / cm^2
for i in range(L):
lattice = lattices[-1] # Use polar lattice
l = lattice.lengths
a = lattice.angles
# Use polar units (volume)
lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[-1]), *a)
d_structs = []
sites = []
for i in range(L):
l = lattices[i]
frac_coord = np.divide(np.array([p_tot[i]]), np.array([l.a, l.b, l.c]))
d = PolarizationLattice(l, ["C"], [np.array(frac_coord).ravel()])
d_structs.append(d)
site = d[0]
if i == 0:
# Adjust nonpolar polarization to be closest to zero.
# This is compatible with both a polarization of zero or a half quantum.
prev_site = [0, 0, 0]
else:
prev_site = sites[-1].coords
new_site = d.get_nearest_site(prev_site, site)
sites.append(new_site[0])
adjust_pol = []
for s, d in zip(sites, d_structs):
l = d.lattice
adjust_pol.append(np.multiply(s.frac_coords, np.array([l.a, l.b, l.c])).ravel())
adjust_pol = np.array(adjust_pol)
return adjust_pol
def get_lattice_quanta(self, convert_to_muC_per_cm2=True, all_in_polar=True):
"""
Returns the dipole / polarization quanta along a, b, and c for
all structures.
"""
lattices = [s.lattice for s in self.structures]
volumes = np.array([s.lattice.volume for s in self.structures])
L = len(self.structures)
e_to_muC = -1.6021766e-13
cm2_to_A2 = 1e16
units = 1.0 / np.array(volumes)
units *= e_to_muC * cm2_to_A2
# convert polarizations and lattice lengths prior to adjustment
if convert_to_muC_per_cm2 and not all_in_polar:
# adjust lattices
for i in range(L):
lattice = lattices[i]
l = lattice.lengths
a = lattice.angles
lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[i]), *a)
elif convert_to_muC_per_cm2 and all_in_polar:
for i in range(L):
lattice = lattices[-1]
l = lattice.lengths
a = lattice.angles
lattices[i] = Lattice.from_parameters(*(np.array(l) * units.ravel()[-1]), *a)
quanta = np.array([np.array(l.lengths) for l in lattices])
return quanta
def get_polarization_change(self, convert_to_muC_per_cm2=True, all_in_polar=True):
"""
Get difference between nonpolar and polar same branch polarization.
"""
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar
)
# reshape to preserve backwards compatibility due to changes
# when switching from np.matrix to np.array
return (tot[-1] - tot[0]).reshape((1, 3))
def get_polarization_change_norm(self, convert_to_muC_per_cm2=True, all_in_polar=True):
"""
Get magnitude of difference between nonpolar and polar same branch
polarization.
"""
polar = self.structures[-1]
a, b, c = polar.lattice.matrix
a, b, c = a / np.linalg.norm(a), b / np.linalg.norm(b), c / np.linalg.norm(c)
P = self.get_polarization_change(
convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar
).ravel()
P_norm = np.linalg.norm(a * P[0] + b * P[1] + c * P[2])
return P_norm
def same_branch_splines(self, convert_to_muC_per_cm2=True, all_in_polar=True):
"""
Fit splines to same branch polarization. This is used to assess any jumps
in the same branch polarizaiton.
"""
from scipy.interpolate import UnivariateSpline
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar
)
L = tot.shape[0]
try:
sp_a = UnivariateSpline(range(L), tot[:, 0].ravel())
except Exception:
sp_a = None
try:
sp_b = UnivariateSpline(range(L), tot[:, 1].ravel())
except Exception:
sp_b = None
try:
sp_c = UnivariateSpline(range(L), tot[:, 2].ravel())
except Exception:
sp_c = None
return sp_a, sp_b, sp_c
def max_spline_jumps(self, convert_to_muC_per_cm2=True, all_in_polar=True):
"""
Get maximum difference between spline and same branch polarization data.
"""
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar
)
sps = self.same_branch_splines(convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar)
max_jumps = [None, None, None]
for i, sp in enumerate(sps):
if sp is not None:
max_jumps[i] = max(tot[:, i].ravel() - sp(range(len(tot[:, i].ravel()))))
return max_jumps
def smoothness(self, convert_to_muC_per_cm2=True, all_in_polar=True):
"""
Get rms average difference between spline and same branch polarization data.
"""
tot = self.get_same_branch_polarization_data(
convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar
)
L = tot.shape[0]
try:
sp = self.same_branch_splines(convert_to_muC_per_cm2=convert_to_muC_per_cm2, all_in_polar=all_in_polar)
except Exception:
print("Something went wrong.")
return None
sp_latt = [sp[i](range(L)) for i in range(3)]
diff = [sp_latt[i] - tot[:, i].ravel() for i in range(3)]
rms = [np.sqrt(np.sum(np.square(diff[i])) / L) for i in range(3)]
return rms
class EnergyTrend:
"""
Class for fitting trends to energies.
"""
def __init__(self, energies):
"""
:param energies: Energies
"""
self.energies = energies
def spline(self):
"""
Fit spline to energy trend data.
"""
from scipy.interpolate import UnivariateSpline
sp = UnivariateSpline(range(len(self.energies)), self.energies, k=4)
return sp
def smoothness(self):
"""
Get rms average difference between spline and energy trend.
"""
energies = self.energies
try:
sp = self.spline()
except Exception:
print("Energy spline failed.")
return None
spline_energies = sp(range(len(energies)))
diff = spline_energies - energies
rms = np.sqrt(np.sum(np.square(diff)) / len(energies))
return rms
def max_spline_jump(self):
"""
Get maximum difference between spline and energy trend.
"""
sp = self.spline()
return max(self.energies - sp(range(len(self.energies))))
def endpoints_minima(self, slope_cutoff=5e-3):
"""
Test if spline endpoints are at minima for a given slope cutoff.
"""
energies = self.energies
try:
sp = self.spline()
except Exception:
print("Energy spline failed.")
return None
der = sp.derivative()
der_energies = der(range(len(energies)))
return {
"polar": abs(der_energies[-1]) <= slope_cutoff,
"nonpolar": abs(der_energies[0]) <= slope_cutoff,
}
|
vorwerkc/pymatgen
|
pymatgen/analysis/ferroelectricity/polarization.py
|
Python
|
mit
| 19,810
|
[
"CRYSTAL",
"VASP",
"pymatgen"
] |
3e0ae6834e7952597d7fe6308ba10689f85f170b2293685a4edf9a1cd83284df
|
#http://fileinfo.com/filetypes/raster_image
IMAGE_EXT = """
.001 Fax File
.2BP Pocket PC Bitmap Image File
.360 360desktop Panorama File
.411 Mavica Thumbnail Image
.73I TI-73 Screenshot File
.8CA TI-84 Plus C Image Var Format
.8CI TI-84 Plus C Pic Vars Format
.8PBS Adobe Photoshop Macintosh File
.8XI TI-83/84 Plus Picture File
.9.PNG NinePatchDrawable Image
.ABM Photo Album
.ACCOUNTPICTURE-MS Windows 8 Account Picture File
.ACORN Acorn Image
.ACR American College of Radiology File
.ADC Scanstudio 16 Color Image
.AFX Auto FX PhotoGraphic Edges Image
.AGIF Active GIF Creator Project
.AGP ArtGem Project File
.AIC Advanced Image Coding File
.AIS ACDSee Image Sequence File
.ALBM HP Photo Printing Software Album File
.APD ACDSee Photo Document
.APM Aldus Placeable Metafile Image
.APNG Animated Portable Network Graphic
.APS Greeting Card Studio Project File
.APX Ability Photopaint Studio Image
.ARR Amber Graphic File
.ART AOL Compressed Image File
.ARTWORK ArtStudio Image
.ARW ArtStudio Image
.ASW ACDSee Slideshow Wizard File
.AVATAR Google Talk Avatar File
.AVB Microsoft Comic Chat Character
.AWD Microsoft Fax Document
.AWD Artweaver Document
.BLKRT Block Artist Image File
.BLZ Compressed Bitmap Image
.BM2 Subspace Bitmap File
.BMC Bitmap Cache File
.BMF Binary Material File
.BMP Bitmap Image File
.BMQ Re-Volt Mipmap File
.BMX Siemens Mobile Animations File
.BMZ Compressed Bitmap Image
.BPG BPG Image
.BRK Brooktrout Fax Document
.BRN Cube Graphics File
.BRT Bryce Textures File
.BSS Resident Evil Background Images File
.BTI Nintendo Texture File
.BW Black and White SGI Image File
.C4 JEDMICS Image File
.CAL CALS Raster Graphic
.CALS CALS Raster Graphic File
.CAM CASIO Digital Camera Picture File
.CAN Canon Navigator Fax Document
.CD5 Chasys Draw Image File
.CDC AutoCAD DesignCenter Preview Cache File
.CDG Compact Disc Plus Graphics Image
.CE ComputerEyes Image
.CIMG CImg Image File
.CIN Kodak Cineon Bitmap File
.CIT Intergraph Bitmap Image File
.CLIP Clip Studio Format File
.COLZ Adobe Collage File
.CPBITMAP iOS Wallpaper Image
.CPC CPC Compressed Image File
.CPD Compressed PhotoDefiner Image File
.CPG Manga Studio Page File
.CPS Corel Photo House File
.CPT Corel Photo-Paint Document
.CPX Corel CMX Compressed File
.CSF Content Sealed Format
.CT Scitex Continuous Tone File
.CUT Dr. Halo Bitmap Image File
.DC2 Kodak Photo-Enhancer File
.DCM DICOM Image
.DCX Zsoft Multi-Page Paintbrush File
.DDB Device Dependent Bitmap
.DDS DirectDraw Surface
.DDT Age of Mythology Textures File
.DGT DST Thumbnail File
.DIB Device Independent Bitmap File
.DIC DICOM Image
.DICOM DICOM Image File
.DJV DjVu Image
.DJVU DjVu Image
.DM3 DigitalMicrograph Image
.DMI BYOND Dream Maker Icon File
.DPX Digital Picture Exchange File
.DRZ Draz Paint File
.DT2 Windows Live Messenger Emoticon Image File
.DTW Desktop Wallpaper
.DVL Virtual Library File
.ECW Enhanced Compression Wavelet Image
.EPP Canon Easy-PhotoPrint Image File
.EXR OpenEXR Image
.FAC FACE Image File
.FACE FACE Graphic File
.FAL Bitmap Graphic Header Information
.FAX Fax Document
.FBM Fuzzy Bitmap Image
.FIL Symbian Application Logo File
.FITS Flexible Image Transport System File
.FPG Fenix Graphics Collection File
.FPOS Photo Pos Pro Image
.FPPX Microsoft Fresh Paint Painting File
.FPX FlashPix Bitmap Image File
.FRM Painter Frame Stack File
.G3 CCITT Group 3 Fax Image
.GBR GIMP Brush File
.GCDP Greeting Card Studio Design Project
.GFB GIFBlast Compressed Image File
.GFIE Greenfish Icon Editor Pro Graphic
.GGR GIMP Gradient File
.GIF Graphical Interchange Format File
.GIH GIMP Image Hose File
.GIM PlayStation Portable Image File
.GMBCK Game Maker Background Image File
.GMSPR Game Maker Sprite File
.GP4 CCITT Group 4 Fax File
.GPD Graphic PhotoDefiner Image File
.GRO Graphic Object Bitmap
.GROB Graphic Object Bitmap File
.GRY Grayscale Image
.HDP HD Photo File
.HDR High Dynamic Range Image File
.HDRP HDRtist Pro Document
.HF HF Image
.HPI Hemera Photo Objects Image File
.HR TRS-80 Image
.HRF Hitachi Raster Format File
.I3D Houdini Image 3D File
.IC1 Low Resolution Imagic Graphics File
.IC2 Medium Resolution Imagic Graphics File
.IC3 High Resolution Imagic Graphics File
.ICA Image Object Content Architecture File
.ICB Targa ICB Bitmap Image
.ICN Windows Icon File
.ICON Icon Image File
.ICPR IconUtils Project File
.ILBM Interleaved Bitmap Image
.IMG GEM Image
.IMJ JFIF Bitmap Image
.INFO ZoomBrowser Image Index File
.INK Pantone Reference File
.INT SGI Integer Image
.IPHOTOPROJECT iPhoto Print Project
.IPICK iPick Football Image
.IPX IPIX Image
.ITC2 iTunes Cover Flow Data File 2
.ITHMB iPod and iPhone Photo Thumbnails File
.IVR Image Worlds File
.IVUE Live Picture IVUE Image
.IWI Infinity Ward Texture File
.J JPEG Image
.J2C JPEG 2000 Code Stream
.J2K JPEG 2000 Image
.JAS Paint Shop Pro Compressed Graphic
.JB2 JBIG2 Image
.JBF Paint Shop Pro Browser Cache File
.JBG Joint Bi-level Image Group File
.JBIG Joint Bi-level Image Group File
.JBIG2 JBIG2
.JBMP JAmes OS Bitmap Image
.JBR Paint Shop Pro Brushes File
.JFI JPEG File Interchange Image
.JFIF JPEG File Interchange Format
.JIA Digital Photo Navigator Album
.JIF JPEG Image File
.JIFF JPEG Image File Format
.JNG JPEG Network Graphic
.JP2 JPEG 2000 Core Image File
.JPC JPEG 2000 Code Stream File
.JPD Joint PhotoDefiner Image
.JPE JPEG Image
.JPEG JPEG Image
.JPF JPEG 2000 Image
.JPG JPEG Image
.JPG2 JPEG 2000 Image
.JPS Stereo JPEG Image
.JPX JPEG 2000 Image File
.JTF JPEG Tagged Interchange Format
.JWL Roxio Jewel Case File
.JXR JPEG XR Image
.KDI KD Player Skin Image File
.KDK Kodak Proprietary Decimated TIFF File
.KFX Kofax Image File
.KIC Kodak Compressed Image File
.KODAK Kodak Photo CD File
.KPG Kai's Power Goo Graphic
.LB Lens Blur Project File
.LBM Deluxe Paint Bitmap Image
.LIF Leica Image File
.LIP Clip Studio Paint File
.LJP Lossless JPEG Image
.LRPREVIEW Adobe Photoshop Lightroom Preview File
.LZP LazPaint Image
.MAC MacPaint Image
.MAT Vue Material File
.MAX PaperPort Scanned Document
.MBM Multi Bitmap File
.MBM Kerbal Space Program Texture File
.MCS Mathcad Image
.MET Presentation Manager Metafile
.MIC Image Composer File
.MIFF Magick Image File
.MIP Multiple Image Print File
.MIX Microsoft Image Exchange File
.MNG Multiple Network Graphic
.MNR AutoCAD Menu Resource File
.MPF Microsoft Media Package FIle
.MPO Multi Picture Object File
.MRB Multiple Resolution Bitmap File
.MRXS MIRAX Virtual Slide File
.MSK Paint Shop Pro Mask File
.MSP Microsoft Paint Bitmap Image
.MXI Maxwell Image
.MYL MyLogo Maker Image
.NCD Nero CoverDesigner File
.NCR NCR Image
.NCT Nero CoverDesigner Template
.NEO NeoChrome Bitmap Image
.NLM Nokia Logo File
.NOL Nokia Operator Logo File
.OC3 openCanvas 3 Event File
.OC4 openCanvas 4 Event File
.OC5 openCanvas 5 Event File
.OCI openCanvas Image
.ODI OpenDocument Image
.OMF OMF Interchange Image File
.OPLC Nokia Operator Logo File
.ORA OpenRaster Image File
.OTA OTA Bitmap Image
.OTB Nokia Over The Air Bitmap Image
.OTI OpenDocument Image Template
.OZB MU Online Image File
.OZJ MU Online Image File
.OZT MU Online Image File
.PAC STAD Graphic File
.PAL Dr. Halo Color Palette File
.PANO Camera Panoramic Picture
.PAP PanoramaStudio Project File
.PAT Pattern File
.PBM Portable Bitmap Image
.PC1 Degas Elite Low Res Image File
.PC2 Degas Elite Medium Res Image File
.PC3 Degas Elite High Res Image File
.PCD Kodak Photo CD Image File
.PCX Paintbrush Bitmap Image File
.PDD Adobe PhotoDeluxe Image
.PDN paint.net Image File
.PE4 Photo Explorer Thumbnail Archive
.PE4 PhotoImpact Image Archive
.PFI PhotoFiltre Studio Image
.PFR Paint Shop Pro Picture Frame File
.PGF Progressive Graphics File
.PGM Portable Gray Map Image
.PI1 Degas Low Resolution Image File
.PI2 Portrait Innovations Photo
.PI2 Degas Medium Resolution Image File
.PI3 Degas High Resolution Image File
.PI4 DEGAS Image
.PI5 DEGAS Bitmap Image
.PI6 DEGAS Bitmap Image File
.PIC Houdini Raster Image
.PIC Generic Picture File
.PIC QuickTime PICT Image
.PICNC Houdini 3D Compositing Image
.PICT Picture File
.PICTCLIPPING Picture Clipping File
.PISKEL Piskel Sprite
.PIX BRL-CAD Raw Image File
.PIX Alias PIX Image
.PIXADEX Pixadex Icon
.PJPEG Progressive JPEG Image
.PJPG Progressive JPEG Image
.PM Unix XV Graphic File
.PM3 PageMaker 3 Document
.PMG Adobe Photoshop Photomerge Panoramic Composition File
.PNG Portable Network Graphic
.PNI Popnoggin Image File
.PNM Portable Any Map Image
.PNS PNG Stereo Image
.PNT MacPaint File
.PNTG MacPaint Graphic File
.POP Samsung Popcon Character File
.POV POV-Ray Raytracing Format
.POV Prolab Object File
.PP4 Picture Publisher Bitmap File
.PP5 Picture Publisher 5 Image File
.PPF Picture Publisher Image File
.PPM Portable Pixmap Image File
.PRW Artlantis Shader Preview File
.PSB Photoshop Large Document Format
.PSD Adobe Photoshop Document
.PSDX Photoshop Touch Document
.PSE Photoshop Elements Photo Project
.PSF PhotoStudio File
.PSP PaintShop Pro Image File
.PSPBRUSH Paint Shop Photo Pro Brush File
.PSPIMAGE PaintShop Pro Image
.PTG ArtRage Painting
.PTK Puntotek Embroidery Design File
.PTS PTGui Project File
.PTX Paint Shop Pro Texture File
.PTX Pentax RAW Image File
.PVR POWERVR Texture File
.PWP PhotoWorks Image File
.PX Pixel Image File
.PXD Pixlr Layered Image
.PXICON CandyBar Icon
.PXM Pixelmator Image File
.PXR Pixar Image File
.PYXEL Pyxel Image Document
.PZA PhotoSuite Album File
.PZP PhotoSuite Project File
.PZS PhotoSuite Slide Show File
.QIF QuickTime Image File
.QMG Samsung Theme Graphics File
.QTI QuickTime Image File
.QTIF QuickTime Image File
.RAS Sun Raster Graphic
.RCL Recolored Project File
.RCU RealWorld Layered Cursor Image
.RGB RGB Bitmap
.RGB Q0 Image File
.RGBA RGB Bitmap
.RGF LEGO MINDSTORMS EV3 Robot Graphics File
.RIC NXT Image File
.RIF Painter Raster Image File
.RIFF Painter Raster Image
.RIX ColoRIX Image
.RLE Run Length Encoded Bitmap
.RLI RealWorld Graphics Layered Image
.RPF Rich Pixel Format File
.RRI RealWorld Image
.RS Sun Raster Graphic
.RSB Red Storm Bitmap File
.RSR Poser Model Preview File
.RTL Raster Transfer Language File
.RVG RVG X-ray Image
.S2MV StarCraft 2 Map Preview File
.SAI PaintTool SAI Image
.SAR Saracen Paint Image
.SBP Sketchbook Express iCloud Image
.SCG ColoRIX Image File
.SCI ColoRIX Graphics Image
.SCN TreePaint Image
.SCP ColoRIX Bitmap Graphic File
.SCT Scitex Continuous Tone File
.SCU ColoRIX Bitmap Image
.SDR SmartDraw Drawing
.SEP Separated Image File
.SFC Motic Microscope Image
.SFF Structured Fax File
.SFW Seattle FilmWorks Image
.SGD GeneSnap Image File
.SGI Silicon Graphics Image File
.SHG Segmented Hyper-Graphic
.SID MrSID Image
.SID ScanIt Document
.SIG Broderbund Sign File
.SIG QuickTac SIG File
.SIM Aurora Image
.SKITCH Skitch Image
.SKM SketchUp Materials File
.SKYPEEMOTICONSET Skype Emoticon Set File
.SLD AutoCAD Slide File
.SMP Xionics SMP Image
.SOB OpenOffice.org Impress Bimap Styles File
.SPA MikuMikuDance Sphere Mapping File
.SPC Spectrum 512 Compressed Image
.SPE WinSpec CCD Capture File
.SPH MikuMikuDance Sphere Mapping File
.SPIFF Still Picture Interchange File Format
.SPJ Microsoft ICE Panorama Project
.SPP Serif PhotoPlus Picture File
.SPR Half-Life Sprite File
.SPRITE Scratch Sprite File
.SPRITE2 Scratch 2.0 Sprite File
.SPU Spectrum 512 Image
.SR Sun Raster Image File
.STE Samsung IPOLIS Image File
.SUMO Sumo Paint Image
.SUN Sun Raster Graphic File
.SUNIFF Sun TAAC Image
.SUP Subtitle Bitmap File
.SVA Manga Browser Comic File
.SVM StarView Metafile
.T2B CyBook Thumbnail Image
.TAAC Sun TAAC Image File
.TARGA Targa Graphic
.TB0 Adobe Photoshop Thumbnail Cache File
.TBN Kodi Thumbnail Image
.TEX Texture File
.TFC Unreal Engine 3 Texture File Cache
.TG4 Tiled Group 4 Raster Image File
.TGA Targa Graphic
.THM Thumbnail Image File
.THM Video Thumbnail File
.THUMB JAlbum Thumbnail File
.TIF Tagged Image File
.TIF GeoTIFF Image
.TIFF Tagged Image File Format
.TJP Tiled JPEG File
.TM2 PlayStation 2 Graphic
.TN LG Phone Image
.TN1 Tiny Low Resolution Image
.TN2 Tiny Medium Resolution Image
.TN3 Tiny High Resolution Image
.TNY Atari Tiny Image
.TPF TexMod Package File
.TPI TrueVision Bitmap Image
.TPS TexturePacker Sprite Sheet File
.TRIF Tiled Raster Interchange Format
.TSR TIFF Simple Rare File
.TUB PaintShop Pro Picture Tube
.U Subsampled Raw YUV Image
.UFO Ulead File Object
.UGA Ulead GIF Animator File
.UGOIRA Ugoira Animated Image File
.URT Utah Raster Toolkit File
.USERTILE-MS Windows 8 User Tile File
.V Subsampled Raw YUV Image
.VDA Targa Bitmap Image File
.VFF Sun TAAC Graphic File
.VIC VICAR Image
.VICAR VICAR Image File
.VIFF Visualization Image File Format
.VNA JVC JLIP Image
.VPE Photoshop Vanishing Point Export File
.VRIMG V-Ray Image File
.VRPHOTO VR Photo Image
.VSS Visio Stencil File
.VST Targa Bitmap Image
.WB0 Webshots Picture File
.WB1 Webshots Picture File
.WB2 Webshots Picture File
.WBC Webshots Collection File
.WBD Webshots Picture File
.WBM Wireless Bitmap Image
.WBMP Wireless Bitmap Image File
.WBP Webshots Collection File
.WBZ Webshots Download Picture File
.WDP Windows Media Photo File
.WEBP WebP Image
.WI Wavelet Image
.WIC J Wavelet Image
.WMP Windows Media Photo File
.WPB openCanvas 1.1 Image
.WPE openCanvas 1.1 Event File
.WVL Wavelet Image
.XBM X11 Bitmap Graphic
.XCF GIMP Image File
.XPM X11 Pixmap Graphic
.XWD X Windows Dump Image
.Y Subsampled Raw YUV Image
.YSP BYOB Sprite File
.YUV YUV Encoded Image File
.ZIF Zooming Image Format File
"""
|
Mioced/mm-searcher
|
src/image_extentions.py
|
Python
|
mit
| 13,857
|
[
"Amber"
] |
147da56f3918edd53b8a5e3a03f7dfdbd51de28a4c410c8c6f44b5d8f5a54f74
|
#!/usr/bin/env python
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enter()."
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
class Death(Scene):
quips = ["You died. You kinda suck at this.",
"Your mom would be proud...if she were smarter.",
"Such a luser.",
"I have a small puppy that's better at this."]
def enter(self):
print Death.quips[randint(0,len(self.quips)-1)]
exit(1)
class CentralCorridor(Scene):
def enter(self):
print "Gothons of Planet Percal #25 have invaded your ship and destroyed"
print "your entire crew. You are the last surviving member and your last"
print "mission is to get the neutron destruct bomb from the Weapons Armory,"
print "put it in the bridge, and blow the ship up after getting into an "
print "escape pod."
print "\n"
print "You're running down the central corridor to the Weapons Armory when"
print "a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume"
print "flowing around his hate filled body. He's blocking the door to the"
print "Armory and about to pull a weapon to blast you."
action = raw_input("> ")
if action == "shoot!":
print "Quick on the draw you yank out your blaster and fire it at the Gothon."
print "His clown costume is flowing and moving around his body, which throws"
print "off your aim. Your laser hits his costume but misses him entirely. This"
print "completely ruins his brand new costume his mother bought him, which"
print "make him fly into an insane rage and blast you repeatedly in the face until"
print "you are dead. Then he eats you."
return 'death'
elif action == "dodge!":
print "Like a world class boxer you dodge, weave, slip and slide right"
print "as the Gothon's blaster cranks a laser past your head."
print "In the middle of your artful dodge your foot slips and you"
print "bang your head on the metal wall and pass out."
print "You wake up shortly after only to die as the Gothon stomps on"
print "your head and eats you."
return 'death'
elif action == "tell a joke":
print "Lucky for you they made you learn Gothon insults in the academy."
print "You tell the one Gothon joke you know:"
print "Lbhe zbgure vf sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr."
print "The Gothon stops, tries not to laugh, then busts out laughing and can't move."
print "While he's laughing you run up and shoot him square in the head"
print "putting him down, the jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "DOES NOT COMPUTE!"
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print "You do a dive roll into the Weapon Armory, crouch and scan the room"
print "for more Gothons that might be hiding. It's dead quiet, too quiet."
print "You stand up and run to the far side of the room and find the"
print "neutron bomb in its container. There's a keypad lock on the box"
print "and you need the code to get the bomb out. If you get the code"
print "wrong 10 times then the lock closes forever and you can't"
print "get the bomb. The code is 3 digits."
code = "%d%d%d" % (randint(1,9),randint(1,9),randint(1,9))
print code
guess = raw_input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print "BZZZZEDDD!"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code:
print "The container clicks open and the seal breaks, letting gas out."
print "You grab the neutron bomb and run as fast as you can to the"
print "bridge where you must place it in the right spot."
return 'the_bridge'
else:
print "The lock buzzes one last time and then you hear a sickening"
print "melting sound as the mechanism fused together."
print "You decide to sit there, and finally the Gothons blow up the"
print "ship from their ship and you die."
return 'death'
class TheBridge(Scene):
def enter(self):
print "You burst onto the Bridge with the neutron destruct bomb"
print "under your arm and surprises 5 Gothons who are trying to"
print "take control of the ship. Each of them has an even uglier"
print "clown costume than the last. They haven't pulled their"
print "weapons out yet, as they see the active bomb under your"
print "arm and don't want to set it off."
action = raw_input("> ")
if action == "throw the bomb":
print "In a panic you throw the bomb at the group of Gothons"
print "and make a leap for the door. Right as you drop it a"
print "Gothon shoots you right in the back killing you."
print "As you die you see another Gothon frantically try to disarm"
print "the bomb. You die knowing they will probably blow up when"
print "in goes off."
return'death'
elif action == "slowly place the bomb":
print "You point your blaster at the bomb under your arm"
print "and the Gothons put their hands up and start to sweat."
print "You inch backward to the door, open it, and then carefully"
print "place the bomb on the floor, pointing your blaster at it."
print "You then jump back through the door, punch the close button"
print "and blast the lock so the Gothons can't get out."
print "Now that the bomb is placed you run to the escape pod to"
print "get off this tin can."
return 'escape_pod'
else:
print "DOES NOT COMPUTE"
return "the_bridge"
class EscapePod(Scene):
def enter(self):
print "You rush through the ship desperately trying to make it to"
print "the escape pod before the whole ship explodes. It seems like"
print "hardly an Gothons are on the ship, so your run is clear of"
print "interference. You get to the chamber with the escape pods, and"
print "now need to pick one to take. Some of them could be damaged"
print "but you don't have time to look. There's 5 pods, which one"
print "do you take?"
good_pod = randint(1,5)
print good_pod
guess = raw_input("[pod #]> ")
if int(guess) != good_pod:
print "You jump into pod %s and hit the eject button." % guess
print "The pod escapes out into the void of space, then"
print "implodes as the hull ruptures, crushing your body"
print "into jam jelly."
return 'death'
else:
print "You jump into pod %s and hit the eject button." % guess
print "The pod easily slides out into space heading to"
print "the planet below. As it files to the planet, you look"
print "back and see your ship implode then explode like a"
print "bright star, taking out the Gothon ship at the same"
print "time. You won!"
return 'finished'
class Finished(Scene):
def enter(self):
print "You won! Good job."
return 'finished'
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished()
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
|
moralesjason/learnpythonthehardway
|
ex43basicobjectorientedanalysisanddesign.py
|
Python
|
gpl-3.0
| 7,718
|
[
"BLAST"
] |
7be74f4cb8895a2c564ed8e6d8b96c67aacf479bce9bbd108c538f6d253142eb
|
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/Geometry-can/'
inputFile = '/Users/seb/Downloads/ParaViewData-3.98.1/Data/can.ex2'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# Pipeline creation
# -----------------------------------------------------------------------------
can = simple.OpenDataFile(inputFile)
can.ElementVariables = ['EQPS']
can.PointVariables = ['DISPL', 'VEL', 'ACCL']
can.GlobalVariables = ['KE', 'XMOM', 'YMOM', 'ZMOM', 'NSTEPS', 'TMSTEP']
can.ElementBlocks = ['Unnamed block ID: 1 Type: HEX', 'Unnamed block ID: 2 Type: HEX']
anim = simple.GetAnimationScene()
anim.UpdateAnimationUsingDataTimeSteps()
timeValues = anim.TimeKeeper.TimestepValues
sceneDescription = {
'scene': [
{
'name': 'Can',
'source': can,
'colors': {
'DISPL': {'location': 'POINT_DATA' },
'VEL': {'location': 'POINT_DATA' },
'ACCL': {'location': 'POINT_DATA' }
}
}
]
}
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
dsb = GeometryDataSetBuilder(outputDir, sceneDescription)
# Add time information
dsb.getDataHandler().registerArgument(priority=1, name='time', values=timeValues, ui='slider', loop='modulo')
dsb.start()
for time in dsb.getDataHandler().time:
anim.TimeKeeper.Time = time
dsb.writeData(time)
dsb.stop()
|
Kitware/tonic-data-generator
|
scripts/paraview/samples/Geometry-can.py
|
Python
|
bsd-3-clause
| 1,859
|
[
"ParaView"
] |
1a255f4522d1867d6c7c8fee50a4925e51ee2b0b2ccae8f6c80ae03f4df851aa
|
"""
test_SpikingTransmission.py
This file is part of ANNarchy.
Copyright (C) 2020 Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ANNarchy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import numpy
from ANNarchy import Neuron, Population, Projection, Network, Monitor, Uniform
class test_LILConnectivity(unittest.TestCase):
"""
A pre-synaptic event should increase the conductance of the post-
synaptic neuron by the value *w* in default case.
"""
@classmethod
def setUpClass(cls):
"""
Build up the network
"""
simple_emit = Neuron(
spike = "t==1",
)
simple_recv = Neuron(
equations = """
g_exc1 = 0
g_exc2 = 0
g_exc3 = 0
""",
spike = "g_exc1>30"
)
# simple in/out populations
in_pop = Population(5, neuron=simple_emit)
out_pop = Population(2, neuron=simple_recv)
# create the projections for the test cases (TC)
# TC: no delay
proj = Projection(pre=in_pop, post=out_pop, target="exc1")
proj.connect_all_to_all(weights=1.0)
# TC: uniform delay
proj_u = Projection(pre=in_pop, post=out_pop, target="exc2")
proj_u.connect_all_to_all(weights=1.0, delays=2.0)
# TC: non-uniform delay
proj_nu = Projection(pre=in_pop, post=out_pop, target="exc3")
proj_nu.connect_all_to_all(weights=1.0, delays=Uniform(2,10))
# Monitor to record the currents
m = Monitor(out_pop, ["g_exc1", "g_exc2", "g_exc3"])
# build network and store required object
# instances
net = Network()
net.add([in_pop, out_pop, proj, proj_u, proj_nu, m])
cls.test_net = net
cls.test_net.compile(silent=True)
cls.test_g_exc_m = net.get(m)
cls.test_proj = net.get(proj_nu)
def setUp(self):
"""
basic setUp() method to reset the network after every test
"""
# back to initial values
self.test_net.reset(populations=True, projections=True)
# clear monitors must be done seperate
self.test_g_exc_m.get()
def test_non_delay(self):
"""
The spikes are emitted at t==1 so the g_exc should be increased in
t == 2. And then again 0, as we reset g_exc.
"""
self.test_net.simulate(5)
g_exc_data = self.test_g_exc_m.get('g_exc1')
self.assertTrue( numpy.allclose( g_exc_data, [[0., 0.], [0., 0.], [5., 5.], [0., 0.], [0., 0.]] ) )
def test_uniform_delay(self):
"""
Test the receiving of spikes emitted at t == 1
"""
self.test_net.simulate(5)
g_exc_data = self.test_g_exc_m.get('g_exc2')
#The spikes are emitted at t==1 and 2 ms delay so the g_exc should be increased in
#t == 3 (1ms delay is always). And then again 0, as we reset g_exc.
self.assertTrue( numpy.allclose( g_exc_data, [[0., 0.], [0., 0.], [0., 0.], [5., 5.], [0., 0.]] ) )
def test_nonuniform_delay(self):
"""
Test the receiving of spikes emitted at t == 1
"""
self.test_proj._set_delay([[1.0, 2.0, 3.0, 2.0, 1.0], [3.0, 2.0, 1.0, 2.0, 3.0]])
self.test_net.simulate(5)
g_exc_data = self.test_g_exc_m.get('g_exc3')
# 1st neuron gets 2 events at t==2, 2 events at t==3 and 1 event at t==4
# 2nd neuron gets 1 event at t==2, 2 events at t==3, and 2 evets at t==4
self.assertTrue( numpy.allclose( g_exc_data, [[0., 0.], [0., 0.], [2., 1.], [2., 2.], [1., 2.]] ) )
class test_CSRConnectivity(unittest.TestCase):
"""
A pre-synaptic event should increase the conductance of the post-
synaptic neuron by the value *w* in default case.
"""
@classmethod
def setUpClass(cls):
"""
Build up the network
"""
simple_emit = Neuron(
spike = "t==1",
)
simple_recv = Neuron(
equations = """
g_exc1 = 0
g_exc2 = 0
g_exc3 = 0
""",
spike = "g_exc1>30"
)
# simple in/out populations
in_pop = Population(5, neuron=simple_emit)
out_pop = Population(2, neuron=simple_recv)
# create the projections for the test cases (TC)
# TC: no delay
proj = Projection(pre=in_pop, post=out_pop, target="exc1")
proj.connect_all_to_all(weights=1.0, storage_format="csr")
# TC: uniform delay
proj_u = Projection(pre=in_pop, post=out_pop, target="exc2")
proj_u.connect_all_to_all(weights=1.0, delays=2.0, storage_format="csr")
# TC: non-uniform delay
proj_nu = Projection(pre=in_pop, post=out_pop, target="exc3")
proj_nu.connect_all_to_all(weights=1.0, delays=Uniform(2,10))
# Monitor to record the currents
m = Monitor(out_pop, ["g_exc1", "g_exc2", "g_exc3"])
# build network and store required object
# instances
net = Network()
net.add([in_pop, out_pop, proj, proj_u, proj_nu, m])
cls.test_net = net
cls.test_net.compile(silent=True)
cls.test_g_exc_m = net.get(m)
cls.test_proj = net.get(proj_nu)
def setUp(self):
"""
basic setUp() method to reset the network after every test
"""
# back to initial values
self.test_net.reset(populations=True, projections=True)
# clear monitors must be done seperate
self.test_g_exc_m.get()
def test_non_delay(self):
"""
The spikes are emitted at t==1 so the g_exc should be increased in
t == 2. And then again 0, as we reset g_exc.
"""
self.test_net.simulate(5)
g_exc_data = self.test_g_exc_m.get('g_exc1')
self.assertTrue( numpy.allclose( g_exc_data, [[0., 0.], [0., 0.], [5., 5.], [0., 0.], [0., 0.]] ) )
def test_uniform_delay(self):
"""
Test the receiving of spikes emitted at t == 1
"""
self.test_net.simulate(5)
g_exc_data = self.test_g_exc_m.get('g_exc2')
#The spikes are emitted at t==1 and 2 ms delay so the g_exc should be increased in
#t == 3 (1ms delay is always). And then again 0, as we reset g_exc.
self.assertTrue( numpy.allclose( g_exc_data, [[0., 0.], [0., 0.], [0., 0.], [5., 5.], [0., 0.]] ) )
def test_nonuniform_delay(self):
"""
Test the receiving of spikes emitted at t == 1
"""
self.test_proj._set_delay([[1.0, 2.0, 3.0, 2.0, 1.0], [3.0, 2.0, 1.0, 2.0, 3.0]])
self.test_net.simulate(5)
g_exc_data = self.test_g_exc_m.get('g_exc3')
# 1st neuron gets 2 events at t==2, 2 events at t==3 and 1 event at t==4
# 2nd neuron gets 1 event at t==2, 2 events at t==3, and 2 evets at t==4
self.assertTrue( numpy.allclose( g_exc_data, [[0., 0.], [0., 0.], [2., 1.], [2., 2.], [1., 2.]] ) )
|
vitay/ANNarchy
|
tests/Unittests/test_SpikingTransmission.py
|
Python
|
gpl-2.0
| 7,599
|
[
"NEURON"
] |
2fa6d43e6d4732b2da09ab14858e87ea388b39b780cd8f957e42706ee1128ff8
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Nodes for PPAPI IDL AST"""
#
# IDL Node
#
# IDL Node defines the IDLAttribute and IDLNode objects which are constructed
# by the parser as it processes the various 'productions'. The IDLAttribute
# objects are assigned to the IDLNode's property dictionary instead of being
# applied as children of The IDLNodes, so they do not exist in the final tree.
# The AST of IDLNodes is the output from the parsing state and will be used
# as the source data by the various generators.
#
import hashlib
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_propertynode import IDLPropertyNode
from idl_namespace import IDLNamespace
from idl_release import IDLRelease, IDLReleaseMap
# IDLAttribute
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLAttribute(object):
def __init__(self, name, value):
self.cls = 'ExtAttribute'
self.name = name
self.value = value
def __str__(self):
return '%s=%s' % (self.name, self.value)
#
# IDLNode
#
# This class implements the AST tree, providing the associations between
# parents and children. It also contains a namepsace and propertynode to
# allow for look-ups. IDLNode is derived from IDLRelease, so it is
# version aware.
#
class IDLNode(IDLRelease):
# Set of object IDLNode types which have a name and belong in the namespace.
NamedSet = set(['Enum', 'EnumItem', 'File', 'Function', 'Interface',
'Member', 'Param', 'Struct', 'Type', 'Typedef'])
show_versions = False
def __init__(self, cls, filename, lineno, pos, children=None):
# Initialize with no starting or ending Version
IDLRelease.__init__(self, None, None)
self.cls = cls
self.lineno = lineno
self.pos = pos
self.filename = filename
self.filenode = None
self.hashes = {}
self.deps = {}
self.errors = 0
self.namespace = None
self.typelist = None
self.parent = None
self.property_node = IDLPropertyNode()
# A list of unique releases for this node
self.releases = None
# A map from any release, to the first unique release
self.first_release = None
# self.children is a list of children ordered as defined
self.children = []
# Process the passed in list of children, placing ExtAttributes into the
# property dictionary, and nodes into the local child list in order. In
# addition, add nodes to the namespace if the class is in the NamedSet.
if not children: children = []
for child in children:
if child.cls == 'ExtAttribute':
self.SetProperty(child.name, child.value)
else:
self.AddChild(child)
#
# String related functions
#
#
# Return a string representation of this node
def __str__(self):
name = self.GetName()
ver = IDLRelease.__str__(self)
if name is None: name = ''
if not IDLNode.show_versions: ver = ''
return '%s(%s%s)' % (self.cls, name, ver)
# Return file and line number for where node was defined
def Location(self):
return '%s(%d)' % (self.filename, self.lineno)
# Log an error for this object
def Error(self, msg):
self.errors += 1
ErrOut.LogLine(self.filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
if self.filenode:
errcnt = self.filenode.GetProperty('ERRORS', 0)
self.filenode.SetProperty('ERRORS', errcnt + 1)
# Log a warning for this object
def Warning(self, msg):
WarnOut.LogLine(self.filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
def GetName(self):
return self.GetProperty('NAME')
def GetNameVersion(self):
name = self.GetProperty('NAME', default='')
ver = IDLRelease.__str__(self)
return '%s%s' % (name, ver)
# Dump this object and its children
def Dump(self, depth=0, comments=False, out=sys.stdout):
if self.cls in ['Comment', 'Copyright']:
is_comment = True
else:
is_comment = False
# Skip this node if it's a comment, and we are not printing comments
if not comments and is_comment: return
tab = ''.rjust(depth * 2)
if is_comment:
out.write('%sComment\n' % tab)
for line in self.GetName().split('\n'):
out.write('%s "%s"\n' % (tab, line))
else:
ver = IDLRelease.__str__(self)
if self.releases:
release_list = ': ' + ' '.join(self.releases)
else:
release_list = ': undefined'
out.write('%s%s%s%s\n' % (tab, self, ver, release_list))
if self.typelist:
out.write('%s Typelist: %s\n' % (tab, self.typelist.GetReleases()[0]))
properties = self.property_node.GetPropertyList()
if properties:
out.write('%s Properties\n' % tab)
for p in properties:
if is_comment and p == 'NAME':
# Skip printing the name for comments, since we printed above already
continue
out.write('%s %s : %s\n' % (tab, p, self.GetProperty(p)))
for child in self.children:
child.Dump(depth+1, comments=comments, out=out)
#
# Search related functions
#
# Check if node is of a given type
def IsA(self, *typelist):
if self.cls in typelist: return True
return False
# Get a list of objects for this key
def GetListOf(self, *keys):
out = []
for child in self.children:
if child.cls in keys: out.append(child)
return out
def GetOneOf(self, *keys):
out = self.GetListOf(*keys)
if out: return out[0]
return None
def SetParent(self, parent):
self.property_node.AddParent(parent)
self.parent = parent
def AddChild(self, node):
node.SetParent(self)
self.children.append(node)
# Get a list of all children
def GetChildren(self):
return self.children
# Get a list of all children of a given version
def GetChildrenVersion(self, version):
out = []
for child in self.children:
if child.IsVersion(version): out.append(child)
return out
# Get a list of all children in a given range
def GetChildrenRange(self, vmin, vmax):
out = []
for child in self.children:
if child.IsRange(vmin, vmax): out.append(child)
return out
def FindVersion(self, name, version):
node = self.namespace.FindNode(name, version)
if not node and self.parent:
node = self.parent.FindVersion(name, version)
return node
def FindRange(self, name, vmin, vmax):
nodes = self.namespace.FindNodes(name, vmin, vmax)
if not nodes and self.parent:
nodes = self.parent.FindVersion(name, vmin, vmax)
return nodes
def GetType(self, release):
if not self.typelist: return None
return self.typelist.FindRelease(release)
def GetHash(self, release):
hashval = self.hashes.get(release, None)
if hashval is None:
hashval = hashlib.sha1()
hashval.update(self.cls)
for key in self.property_node.GetPropertyList():
val = self.GetProperty(key)
hashval.update('%s=%s' % (key, str(val)))
typeref = self.GetType(release)
if typeref:
hashval.update(typeref.GetHash(release))
for child in self.GetChildren():
if child.IsA('Copyright', 'Comment', 'Label'): continue
if not child.IsRelease(release):
continue
hashval.update( child.GetHash(release) )
self.hashes[release] = hashval
return hashval.hexdigest()
def GetDeps(self, release, visited=None):
visited = visited or set()
# If this release is not valid for this object, then done.
if not self.IsRelease(release) or self.IsA('Comment', 'Copyright'):
return set([])
# If we have cached the info for this release, return the cached value
deps = self.deps.get(release, None)
if deps is not None:
return deps
# If we are already visited, then return
if self in visited:
return set([self])
# Otherwise, build the dependency list
visited |= set([self])
deps = set([self])
# Get child deps
for child in self.GetChildren():
deps |= child.GetDeps(release, visited)
visited |= set(deps)
# Get type deps
typeref = self.GetType(release)
if typeref:
deps |= typeref.GetDeps(release, visited)
self.deps[release] = deps
return deps
def GetVersion(self, release):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetVersion(release)
def GetUniqueReleases(self, releases):
"""Return the unique set of first releases corresponding to input
Since we are returning the corresponding 'first' version for a
release, we may return a release version prior to the one in the list."""
my_min, my_max = self.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
return []
out = set()
for rel in releases:
remapped = self.first_release[rel]
if not remapped: continue
out |= set([remapped])
out = sorted(out)
return out
def GetRelease(self, version):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetRelease(version)
def _GetReleases(self, releases):
if not self.releases:
my_min, my_max = self.GetMinMax(releases)
my_releases = [my_min]
if my_max != releases[-1]:
my_releases.append(my_max)
my_releases = set(my_releases)
for child in self.GetChildren():
if child.IsA('Copyright', 'Comment', 'Label'):
continue
my_releases |= child.GetReleases(releases)
self.releases = my_releases
return self.releases
def _GetReleaseList(self, releases, visited=None):
visited = visited or set()
if not self.releases:
# If we are unversionable, then return first available release
if self.IsA('Comment', 'Copyright', 'Label'):
self.releases = []
return self.releases
# Generate the first and if deprecated within this subset, the
# last release for this node
my_min, my_max = self.GetMinMax(releases)
if my_max != releases[-1]:
my_releases = set([my_min, my_max])
else:
my_releases = set([my_min])
# Break cycle if we reference ourselves
if self in visited:
return [my_min]
visited |= set([self])
# Files inherit all their releases from items in the file
if self.IsA('AST', 'File'):
my_releases = set()
# Visit all children
child_releases = set()
# Exclude sibling results from parent visited set
cur_visits = visited
for child in self.children:
child_releases |= set(child._GetReleaseList(releases, cur_visits))
visited |= set(child_releases)
# Visit my type
type_releases = set()
if self.typelist:
type_list = self.typelist.GetReleases()
for typenode in type_list:
type_releases |= set(typenode._GetReleaseList(releases, cur_visits))
type_release_list = sorted(type_releases)
if my_min < type_release_list[0]:
type_node = type_list[0]
self.Error('requires %s in %s which is undefined at %s.' % (
type_node, type_node.filename, my_min))
for rel in child_releases | type_releases:
if rel >= my_min and rel <= my_max:
my_releases |= set([rel])
self.releases = sorted(my_releases)
return self.releases
def GetReleaseList(self):
return self.releases
def BuildReleaseMap(self, releases):
unique_list = self._GetReleaseList(releases)
my_min, my_max = self.GetMinMax(releases)
self.first_release = {}
last_rel = None
for rel in releases:
if rel in unique_list:
last_rel = rel
self.first_release[rel] = last_rel
if rel == my_max:
last_rel = None
def SetProperty(self, name, val):
self.property_node.SetProperty(name, val)
def GetProperty(self, name, default=None):
return self.property_node.GetProperty(name, default)
def Traverse(self, data, func):
func(self, data)
for child in self.children:
child.Traverse(data, func)
#
# IDLFile
#
# A specialized version of IDLNode which tracks errors and warnings.
#
class IDLFile(IDLNode):
def __init__(self, name, children, errors=0):
attrs = [IDLAttribute('NAME', name),
IDLAttribute('ERRORS', errors)]
if not children: children = []
IDLNode.__init__(self, 'File', name, 1, 0, attrs + children)
self.release_map = IDLReleaseMap([('M13', 1.0)])
#
# Tests
#
def StringTest():
errors = 0
name_str = 'MyName'
text_str = 'MyNode(%s)' % name_str
name_node = IDLAttribute('NAME', name_str)
node = IDLNode('MyNode', 'no file', 1, 0, [name_node])
if node.GetName() != name_str:
ErrOut.Log('GetName returned >%s< not >%s<' % (node.GetName(), name_str))
errors += 1
if node.GetProperty('NAME') != name_str:
ErrOut.Log('Failed to get name property.')
errors += 1
if str(node) != text_str:
ErrOut.Log('str() returned >%s< not >%s<' % (str(node), text_str))
errors += 1
if not errors: InfoOut.Log('Passed StringTest')
return errors
def ChildTest():
errors = 0
child = IDLNode('child', 'no file', 1, 0)
parent = IDLNode('parent', 'no file', 1, 0, [child])
if child.parent != parent:
ErrOut.Log('Failed to connect parent.')
errors += 1
if [child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren.')
errors += 1
if child != parent.GetOneOf('child'):
ErrOut.Log('Failed GetOneOf(child)')
errors += 1
if parent.GetOneOf('bogus'):
ErrOut.Log('Failed GetOneOf(bogus)')
errors += 1
if not parent.IsA('parent'):
ErrOut.Log('Expecting parent type')
errors += 1
parent = IDLNode('parent', 'no file', 1, 0, [child, child])
if [child, child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren2.')
errors += 1
if not errors: InfoOut.Log('Passed ChildTest')
return errors
def Main():
errors = StringTest()
errors += ChildTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
|
plxaye/chromium
|
src/ppapi/generators/idl_node.py
|
Python
|
apache-2.0
| 14,337
|
[
"VisIt"
] |
b4e8faa47e7fad265471756f630de2acc3de227652d66752aa3aad7e675bf42b
|
from galaxy.test.base.twilltestcase import TwillTestCase
#from twilltestcase import TwillTestCase
class EncodeTests(TwillTestCase):
def test_00_first(self): # will run first due to its name
"""3B_GetEncodeData: Clearing history"""
self.clear_history()
def test_10_Encode_Data(self):
"""3B_GetEncodeData: Getting encode data"""
self.run_tool('encode_import_chromatin_and_chromosomes1', hg17=['cc.EarlyRepSeg.20051216.bed'] )
# hg17=[ "cc.EarlyRepSeg.20051216.bed", "cc.EarlyRepSeg.20051216.gencode_partitioned.bed", "cc.LateRepSeg.20051216.bed", "cc.LateRepSeg.20051216.gencode_partitioned.bed", "cc.MidRepSeg.20051216.bed", "cc.MidRepSeg.20051216.gencode_partitioned.bed" ] )
self.wait()
self.check_data('cc.EarlyRepSeg.20051216.bed', hid=1)
# self.check_data('cc.EarlyRepSeg.20051216.gencode_partitioned.bed', hid=2)
# self.check_data('cc.LateRepSeg.20051216.bed', hid=3)
# self.check_data('cc.LateRepSeg.20051216.gencode_partitioned.bed', hid=4)
# self.check_data('cc.MidRepSeg.20051216.bed', hid=5)
# self.check_data('cc.MidRepSeg.20051216.gencode_partitioned.bed', hid=6)
|
jmchilton/galaxy-central
|
galaxy/test/functional/test_3B_GetEncodeData.py
|
Python
|
mit
| 1,185
|
[
"Galaxy"
] |
2c3c256f58ceabe102717818b92e529d033f2736b17ca3a6ecb3e6f5885859a3
|
###########################################################################
# TaskToKmz
# Copyright 2013 by Tonino Tarsi <tony.tarsi@gmail.com>
#
# Please refer to the LICENSE file for conditions
# Visit http://www.vololiberomontecucco.it
#
##########################################################################
import os
import string
import random
################################# modify below #####################################
inIGCDir = "./igc"
outFile = "./task1.kml"
taskName = "Task"
modelscale = "5"
startTime = "1214" # or use "none" remenber this is UTC
endTime = "1800"
crateFlyForAllPilots = True
pilots2follow = ["CHRISTIAN CIECH","ALESSANDRO PLONER","EDOARDO GIUDICEANDREA"] # Ingnored if crateFlyForAllPilots
flySpeed = 8
tilt = 75
min_distance = 1500
max_distance = 2500
split_tout_time = 1
heigth_offset = 0
################################# Do not modify below #####################################
def isAfterStart(theTime,startTime):
#print theTime,startTime,theTime[0:2],theTime[2:4]
r = ( int(theTime[0:2])*60+int(theTime[2:4]) ) - ( int(startTime[0:2])*60+int(startTime[2:4]) )
if ( r >= 0):
return True
else:
return False
class gpsPoint(object):
def __init__(self, t,y,x,h):
code = x[7]
deg = x[0:2]
pri = x[2:4]
dec = x[4:7]
dx = float(deg) + (float(pri)+ float(dec)/1000) /60
if ( code == "W"):
dx = -dx
self.x = dx
code = y[7]
deg = y[0:2]
pri = y[2:4]
dec = y[4:7]
dy = float(deg) + (float(pri)+ float(dec)/1000) /60
if ( code == "S"):
dx = -dx
self.y = dy
self.h = float(h)+heigth_offset
self.t = t
class igcClass(object):
def __init__(self, filename):
self.filename = filename
self.PilotName = None
self.date = None
self.day = None
self.year = None
fo = open(filename, "r")
while ( True ):
line = fo.readline()
#print line
if ( line.split(" ")[0] == "HOPLTPILOT:" or line.split(" ")[0] == "HFPLTPILOT:") :
self.PilotName = line[12:len(line)-1]
if line[0:5] == "HFDTE":
self.date = line[5:]
self.day = line[5:7]
self.month = line[7:9]
self.year = line[9:11]
if ( line.split(" ")[0] == "HOSITSite:" or line.split(" ")[0][0] == 'B'):
break
fo.close()
def GetPoints(self):
fo = open(self.filename, "r")
listGPS = []
for line in fo:
code = line[0]
if ( code == "B" ):
theTime = line[1:7]
if isAfterStart(theTime,startTime):
listGPS.append(gpsPoint(line[1:7],line[7:15],line[16:24],line[26:30]))
fo.close()
return listGPS
whitelist = ['.igc']
contents = os.listdir(inIGCDir)
igcfilelist = []
for filename in contents:
if os.path.splitext( filename )[1] in whitelist:
igcfilelist.append(inIGCDir + "/" +filename)
listigc = []
for igcfile in igcfilelist:
igc = igcClass(igcfile)
listigc.append(igc)
outfo = open(outFile,"w")
template1 = string.Template("""<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">
<Document>
<name>$TaskName</name>
<open>1</open>
""")
d = dict(TaskName=taskName)
outfo.write(template1.safe_substitute(d))
i = 0
for pilot in listigc:
x = random.randint(0, 16777215)
labelcolor = "FF%x" % x
trackcolor = "7F%x" % x
templateStyle = string.Template("""<StyleMap id="StyleMapID$i">
<Pair>
<key>normal</key>
<styleUrl>#style$i</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#hl</styleUrl>
</Pair>
</StyleMap>
<Style id="style$i">
<IconStyle>
<Icon>
</Icon>
</IconStyle>
<LabelStyle>
<color>$labelcolor</color>
<scale>0.4</scale>
</LabelStyle>
<LineStyle>
<color>$trackcolor</color>
<width>0.5</width>
</LineStyle>
</Style>
""")
d = dict(i=i)
d.update(labelcolor=labelcolor)
d.update(trackcolor=trackcolor)
outfo.write(templateStyle.safe_substitute(d))
i = i + 1
outfo.write(""" <Folder>
<name>Pilots</name>
<open>1</open>
""")
i = 0
for pilot in listigc:
print pilot.PilotName
points = pilot.GetPoints()
if ( len(points) == 0) :
continue
template = string.Template("""<Placemark>
<name>$pilotName</name>
<styleUrl>#style$i</styleUrl>
""")
d = dict(i=i)
d.update(pilotName=pilot.PilotName)
outfo.write(template.safe_substitute(d))
outfo.write(""" <gx:Track>
<altitudeMode>absolute</altitudeMode>""")
for point in points:
when = " <when>20"+pilot.year+"-"+pilot.month+"-"+pilot.day+"T"+point.t[0:2]+":"+point.t[2:4]+":"+point.t[4:6]+"Z</when>"
outfo.write(when+"\n")
for point in points:
when = " <gx:coord>%.6f %.6f %.0f</gx:coord>" % (point.x,point.y,point.h)
outfo.write(when+"\n")
template = string.Template(""" <Model id="model_3">
<altitudeMode>relativeToGround</altitudeMode>
<Location id="model_3">
<longitude>$x</longitude>
<latitude>$y</latitude>
<altitude>$h</altitude>
</Location>
<Orientation>
<heading>360</heading>
<tilt>0</tilt>
<roll>0</roll>
</Orientation>
<Scale>
<x>$s</x>
<y>$s</y>
<z>$s</z>
</Scale>
<Link>
<href>files/hg.dae</href>
</Link>
<ResourceMap>
</ResourceMap>
</Model>
</gx:Track>
</Placemark>
""")
try:
d = dict(s=modelscale)
d.update(x=points[0].x)
d.update(y=points[0].y)
d.update(h=points[0].h)
outfo.write(template.safe_substitute(d))
except:
print len(points)
i = i + 1
outfo.write("</Folder>")
outfo.write(""" <Folder>
<name>fly</name>
<open>1</open>
""")
# ######################### Create tour
for pilot in listigc:
points = pilot.GetPoints()
if ( len(points) == 0) :
continue
if pilot.PilotName in pilots2follow or crateFlyForAllPilots :
print "Creating tour for" + pilot.PilotName
outfo.write("""<gx:Tour>
<name>"""+pilot.PilotName+"""</name>
<gx:Playlist>""")
i = 0
hp = 0
heading = random.randint(0, 360)
distance = random.randint(min_distance,max_distance)
distance_step = 50
for point in points:
if ( i % 10 == 0 ):
if ( i % 20 or True) :
distance = distance + distance_step
if ( distance > max_distance ) :
distance_step = - distance_step
if ( distance < min_distance ) :
distance_step = - distance_step
if ( i % 5 == 0 ):
heading = heading + 5
if ( heading > 360):
heading = 0
bgn="20"+listigc[0].year+"-"+listigc[0].month+"-"+listigc[0].day+"T"+points[0].t[0:2]+":"+points[0].t[2:4]+":"+points[0].t[4:6]+"Z"
end="20"+pilot.year+"-"+pilot.month+"-"+pilot.day+"T"+point.t[0:2]+":"+point.t[2:4]+":"+point.t[4:6]+"Z"
h = int(point.t[0:2]) * 3600 + int(point.t[2:4])*60 + int(point.t[4:6])
if ( hp == 0):
timetofly = 1
else:
timetofly = ( h - hp ) / flySpeed
hp = h
template = string.Template(""" <gx:FlyTo>
<gx:duration>$timetofly</gx:duration>
<gx:flyToMode>smooth</gx:flyToMode>
<LookAt>
<TimeSpan>
<begin>$bgn</begin>
<end>$end</end>
</TimeSpan>
<longitude>$x</longitude>
<latitude>$y</latitude>
<altitude>$h</altitude>
<heading>$heading</heading>
<tilt>$tilt</tilt>
<range>$distance</range>
<altitudeMode>absolute</altitudeMode>
</LookAt>
</gx:FlyTo>""")
d = dict(bgn=bgn)
d.update(end=end)
d.update(x=point.x)
d.update(y=point.y)
d.update(h=point.h)
d.update(timetofly=timetofly)
d.update(heading=heading)
d.update(tilt=tilt)
d.update(distance=distance)
outfo.write(template.safe_substitute(d))
i = i + 1
outfo.write("""</gx:Playlist>
</gx:Tour>""")
outfo.write("""</Folder>
</Document>
</kml>
""")
outfo.close()
print "Done"
|
ToninoTarsi/TaskToKmz
|
TaskToKmz.py
|
Python
|
gpl-3.0
| 7,882
|
[
"VisIt"
] |
788598b6c7df8bda93d72a669423c38bf20c43300ded6efff3711274a0310aa7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from pymatgen.core.structure import Structure
from pymatgen.io.atat import Mcsqs
from pymatgen.util.testing import PymatgenTest
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "mcsqs")
class AtatTest(PymatgenTest):
def test_mcsqs_import(self):
test_string = """1.000000 0.000000 0.000000
0.000000 1.000000 0.000000
0.000000 0.000000 1.000000
0.000000 -1.000000 -2.000000
2.000000 -1.000000 0.000000
-1.000000 -1.000000 1.000000
0.000000 -2.000000 -1.000000 Mn
1.000000 -2.000000 -1.000000 Mn
0.000000 -1.000000 -1.000000 Mn
-0.000000 -2.000000 0.000000 Mn
1.000000 -2.000000 0.000000 Mn
0.000000 -1.000000 0.000000 Mn
1.000000 -1.000000 0.000000 Fe
1.000000 -3.000000 -1.000000 Mn
0.500000 -1.500000 -0.500000 Sr
1.500000 -1.500000 -0.500000 Ca
-0.500000 -1.500000 0.500000 Ca
0.500000 -1.500000 0.500000 Ca
1.500000 -2.500000 -1.500000 Ca
0.500000 -1.500000 -1.500000 Sr
0.500000 -2.500000 -0.500000 Sr
-0.500000 -1.500000 -0.500000 Ca
0.000000 -1.500000 -1.000000 O
1.000000 -1.500000 -1.000000 O
1.000000 -2.500000 0.000000 O
-0.000000 -1.500000 0.000000 O
1.000000 -1.500000 0.000000 O
0.000000 -0.500000 0.000000 O
0.000000 -2.500000 -1.000000 O
1.000000 -2.500000 -1.000000 O
0.500000 -2.000000 -1.000000 O
1.500000 -2.000000 -1.000000 O
0.500000 -1.000000 -1.000000 O
0.500000 -2.000000 0.000000 O
-0.500000 -1.000000 0.000000 O
0.500000 -1.000000 0.000000 O
1.500000 -1.000000 0.000000 O
-0.500000 -2.000000 -1.000000 O
0.000000 -2.000000 -0.500000 O
1.000000 -2.000000 -0.500000 O
0.000000 -1.000000 -0.500000 O
1.000000 -1.000000 -0.500000 O
1.000000 -2.000000 0.500000 O
0.000000 -1.000000 0.500000 O
1.000000 -2.000000 -1.500000 O
0.000000 -1.000000 -1.500000 O
"""
s = Mcsqs.structure_from_string(test_string)
self.assertEqual(s.composition.formula, "Sr3 Ca5 Mn7 Fe1 O24")
self.assertAlmostEqual(s.lattice.a, 2.2360679775)
self.assertAlmostEqual(s.lattice.b, 2.2360679775)
self.assertAlmostEqual(s.lattice.c, 1.73205080757)
def test_mcsqs_export(self):
s = self.get_structure("SrTiO3")
s.replace_species({"Sr2+": {"Sr2+": 0.5, "Ca2+": 0.5}})
ref_string = """3.905000 0.000000 0.000000
-0.000000 3.905000 0.000000
0.000000 0.000000 3.905000
1.0 0.0 0.0
0.0 1.0 0.0
0.0 0.0 1.0
0.500000 0.500000 0.500000 Sr2+=0.5,Ca2+=0.5
0.000000 0.000000 0.000000 Ti4+=1.0
0.000000 0.000000 0.500000 O2-=1.0
0.000000 0.500000 0.000000 O2-=1.0
0.500000 0.000000 0.000000 O2-=1.0"""
self.assertEqual(Mcsqs(s).to_string(), ref_string)
def test_mcsqs_cif_nacl(self):
# cif file from str2cif (utility distributed with atat)
struc_from_cif = Structure.from_file(os.path.join(test_dir, "bestsqs_nacl.cif"))
# output file directly from mcsqs
struc_from_out = Structure.from_file(os.path.join(test_dir, "bestsqs_nacl.out"))
self.assertTrue(struc_from_cif.matches(struc_from_out))
self.assertArrayAlmostEqual(
struc_from_out.lattice.parameters,
struc_from_cif.lattice.parameters,
decimal=4,
)
def test_mcsqs_cif_pzt(self):
# cif file from str2cif (utility distributed with atat)
struc_from_cif = Structure.from_file(os.path.join(test_dir, "bestsqs_pzt.cif"))
# output file directly from mcsqs
struc_from_out = Structure.from_file(os.path.join(test_dir, "bestsqs_pzt.out"))
self.assertTrue(struc_from_cif.matches(struc_from_out))
self.assertArrayAlmostEqual(
struc_from_out.lattice.parameters,
struc_from_cif.lattice.parameters,
decimal=4,
)
|
gmatteo/pymatgen
|
pymatgen/io/tests/test_atat.py
|
Python
|
mit
| 3,797
|
[
"pymatgen"
] |
81f49159a38787263d7cc3716435478065324e4e18c6cab6a19c0af046d759d8
|
import numpy as np
from pyiid.experiments.elasticscatter.kernels.master_kernel import get_rw, \
get_chi_sq, get_grad_rw, \
get_grad_chi_sq
from ase.calculators.calculator import Calculator
__author__ = 'christopher'
def wrap_rw(gcalc, gobs):
"""
Generate the Rw value
Parameters
-----------
gcalc: np.ndarray
The calculated 1D data
gobs: np.ndarray
The observed 1D data
Returns
-------
rw: float
The Rw value in percent
scale: float
The scale factor between the observed and calculated PDF
"""
rw, scale = get_rw(gobs, gcalc, weight=None)
return rw, scale
def wrap_chi_sq(gcalc, gobs):
"""
Generate the Rw value
Parameters
-----------
gcalc: np.ndarray
The calculated 1D data
gobs: np.ndarray
The observed 1D data
Returns
-------
rw: float
The Rw value in percent
scale: float
The scale factor between the observed and calculated PDF
"""
rw, scale = get_chi_sq(gobs, gcalc)
return rw, scale
def wrap_grad_rw(grad_gcalc, gcalc, gobs):
"""
Generate the Rw value gradient
Parameters
-----------
grad_gcalc: ndarray
The gradient of the 1D data
gcalc: np.ndarray
The calculated 1D data
gobs: np.ndarray
The observed 1D data
Returns
-------
grad_rw: ndarray
The gradient of the Rw value with respect to the atomic positions,
in percent
"""
rw, scale = wrap_rw(gcalc, gobs)
grad_rw = np.zeros((len(grad_gcalc), 3))
get_grad_rw(grad_rw, grad_gcalc, gcalc, gobs, rw, scale)
return grad_rw
def wrap_grad_chi_sq(grad_gcalc, gcalc, gobs):
"""
Generate the Rw value gradient
Parameters
-----------
grad_gcalc: ndarray
The gradient of the 1D data
gcalc: np.ndarray
The calculated 1D data
gobs: np.ndarray
The observed 1D data
Returns
-------
grad_chi_sq: ndarray
The gradient of the chi squared value with respect to the atomic
positions, in percent
"""
chi_sq, scale = wrap_chi_sq(gcalc, gobs)
grad_chi_sq = np.zeros((len(grad_gcalc), 3))
get_grad_chi_sq(grad_chi_sq, grad_gcalc, gcalc, gobs, scale)
return grad_chi_sq
class Null(Calculator):
"""
Returns zero forces or energy
"""
implemented_properties = ['energy', 'forces']
def __init__(self, restart=None, ignore_bad_restart_file=False, label=None,
atoms=None, **kwargs):
Calculator.__init__(self, restart, ignore_bad_restart_file,
label, atoms, **kwargs)
def calculate(self, atoms=None, properties=None,
system_changes=None):
"""PDF Calculator
Parameters
----------
atoms: Atoms object
Contains positions, unit-cell, ...
properties: list of str
List of what needs to be calculated. Can be any combination
of 'energy', 'forces'
system_changes: list of str
List of what has changed since last calculation. Can be
any combination of these five: 'positions', 'numbers', 'cell',
'pbc', 'charges' and 'magmoms'.
"""
if system_changes is None:
system_changes = ['positions', 'numbers', 'cell',
'pbc', 'charges', 'magmoms']
if properties is None:
properties = ['energy']
Calculator.calculate(self, atoms, properties, system_changes)
# we shouldn't really recalc if charges or magmos change
if len(system_changes) > 0: # something wrong with this way
if 'energy' in properties:
self.calculate_energy(self.atoms)
if 'forces' in properties:
self.calculate_forces(self.atoms)
for calc_property in properties:
if calc_property not in self.results:
if calc_property is 'energy':
self.calculate_energy(self.atoms)
if calc_property is 'forces':
self.calculate_forces(self.atoms)
def calculate_energy(self, atoms):
"""
Calculate energy
:param atoms:
:return:
"""
self.results['energy'] = 0.0
def calculate_forces(self, atoms):
forces = np.zeros((len(atoms), 3))
self.results['forces'] = forces
|
CJ-Wright/pyIID
|
pyiid/calc/__init__.py
|
Python
|
bsd-3-clause
| 4,468
|
[
"ASE"
] |
aea190d75394e263cb70c7fe3fbc039aa4500802e32a13987c00a10fec346ab0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.