text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
from collections import namedtuple
# Read the challenge input
with open("input.txt", 'r') as input_file:
puzzle_input = input_file.read()
# Tuple to use as dictionary key
Location = namedtuple('Location', ['x', 'y'])
# Start dictionary with Santa's starting location
presents_delivered = {Location(0,0): 1}
# Initialize Santa and robot's starting location
current_x = [0, 0]
current_y = [0, 0]
santas_turn = True
# For each character in the input
for c in puzzle_input:
# Swapping turns
offset = 0
if not santas_turn: offset = 1
santas_turn = not santas_turn
# Move current person based on the character
if c == '<': current_x[offset] -= 1
elif c == '>': current_x[offset] += 1
elif c == '^': current_y[offset] += 1
elif c == 'v': current_y[offset] -= 1
# Check if the location is in the dictionary yet, if not, added it
loc = Location(current_x[offset], current_y[offset])
if loc in presents_delivered:
presents_delivered[loc] += 1
else:
presents_delivered[loc] = 1
# Total number of houses visited is total number of entries in the dictionary
print('Santa and Robo-Santa managed to visit', len(presents_delivered), 'houses!')
|
joseph-roque/advent-of-code
|
day_03/p2_robo.py
|
Python
|
mit
| 1,197
|
[
"VisIt"
] |
fdea909e370130253b7fa9232e15638bd792086ff15d5ab1563298f4635674e3
|
import itertools
import numpy as np
from functools import reduce
from pyscf import lib
from pyscf.pbc.lib import kpts_helper
einsum = lib.einsum
#FIXME: the dtype of each intermediates. When the khf is at gamma point, the
# dtype is inconsistent between intermediates and t amplitudes
def make_tau(cc, t2, t1, t1p, fac=1.):
t2aa, t2ab, t2bb = t2
nkpts = len(t2aa)
tauaa = t2aa.copy()
tauab = t2ab.copy()
taubb = t2bb.copy()
for ki in range(nkpts):
for kj in range(nkpts):
tauaa[ki,kj,ki] += einsum('ia,jb->ijab', fac*.5*t1[0][ki], t1p[0][kj])
tauaa[ki,kj,kj] -= einsum('ib,ja->ijab', fac*.5*t1[0][ki], t1p[0][kj])
tauaa[ki,kj,kj] -= einsum('ja,ib->ijab', fac*.5*t1[0][kj], t1p[0][ki])
tauaa[ki,kj,ki] += einsum('jb,ia->ijab', fac*.5*t1[0][kj], t1p[0][ki])
taubb[ki,kj,ki] += einsum('ia,jb->ijab', fac*.5*t1[1][ki], t1p[1][kj])
taubb[ki,kj,kj] -= einsum('ib,ja->ijab', fac*.5*t1[1][ki], t1p[1][kj])
taubb[ki,kj,kj] -= einsum('ja,ib->ijab', fac*.5*t1[1][kj], t1p[1][ki])
taubb[ki,kj,ki] += einsum('jb,ia->ijab', fac*.5*t1[1][kj], t1p[1][ki])
tauab[ki,kj,ki] += einsum('ia,jb->ijab', fac*.5*t1[0][ki], t1p[1][kj])
tauab[ki,kj,ki] += einsum('jb,ia->ijab', fac*.5*t1[1][kj], t1p[0][ki])
return tauaa, tauab, taubb
def make_tau2(cc, t2, t1, t1p, fac=1.):
t2aa, t2ab, t2bb = t2
nkpts = len(t2aa)
tauaa = t2aa.copy()
tauab = t2ab.copy()
taubb = t2bb.copy()
for ki in range(nkpts):
for kj in range(nkpts):
tauaa[ki,kj,ki] += einsum('ia,jb->ijab', fac*.5*t1[0][ki], t1p[0][kj])
tauaa[ki,kj,ki] += einsum('jb,ia->ijab', fac*.5*t1[0][kj], t1p[0][ki])
taubb[ki,kj,ki] += einsum('ia,jb->ijab', fac*.5*t1[1][ki], t1p[1][kj])
taubb[ki,kj,ki] += einsum('jb,ia->ijab', fac*.5*t1[1][kj], t1p[1][ki])
tauab[ki,kj,ki] += einsum('ia,jb->ijab', fac*.5*t1[0][ki], t1p[1][kj])
tauab[ki,kj,ki] += einsum('jb,ia->ijab', fac*.5*t1[1][kj], t1p[0][ki])
return tauaa, tauab, taubb
def cc_Fvv(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocc_a, nvir_a = t1a.shape
nocc_b, nvir_b = t1b.shape[1:]
kconserv = cc.khelper.kconserv
fa = np.zeros((nkpts,nvir_a,nvir_a), dtype=np.complex128)
fb = np.zeros((nkpts,nvir_b,nvir_b), dtype=np.complex128)
tau_tildeaa,tau_tildeab,tau_tildebb=make_tau(cc,t2,t1,t1,fac=0.5)
fov = eris.fock[0][:,:nocc_a,nocc_a:]
fOV = eris.fock[1][:,:nocc_b,nocc_b:]
fvv = eris.fock[0][:,nocc_a:,nocc_a:]
fVV = eris.fock[1][:,nocc_b:,nocc_b:]
for ka in range(nkpts):
fa[ka]+=fvv[ka]
fb[ka]+=fVV[ka]
fa[ka]-=0.5*einsum('me,ma->ae',fov[ka],t1a[ka])
fb[ka]-=0.5*einsum('me,ma->ae',fOV[ka],t1b[ka])
for km in range(nkpts):
fa[ka]+=einsum('mf,fmea->ae',t1a[km], eris.vovv[km,km,ka].conj())
fa[ka]-=einsum('mf,emfa->ae',t1a[km], eris.vovv[ka,km,km].conj())
fa[ka]+=einsum('mf,fmea->ae',t1b[km], eris.VOvv[km,km,ka].conj())
fb[ka]+=einsum('mf,fmea->ae',t1b[km], eris.VOVV[km,km,ka].conj())
fb[ka]-=einsum('mf,emfa->ae',t1b[km], eris.VOVV[ka,km,km].conj())
fb[ka]+=einsum('mf,fmea->ae',t1a[km], eris.voVV[km,km,ka].conj())
for kn in range(nkpts):
kf = kconserv[km,ka,kn]
tmp = eris.ovov[km,ka,kn] - eris.ovov[km,kf,kn].transpose(0,3,2,1)
fa[ka] -= einsum('mnaf,menf->ae', tau_tildeaa[km,kn,ka], tmp) * .5
fa[ka] -= einsum('mNaF,meNF->ae', tau_tildeab[km,kn,ka], eris.ovOV[km,ka,kn])
tmp = eris.OVOV[km,ka,kn] - eris.OVOV[km,kf,kn].transpose(0,3,2,1)
fb[ka] -= einsum('mnaf,menf->ae', tau_tildebb[km,kn,ka], tmp) * .5
fb[ka] -= einsum('MnFa,MFne->ae', tau_tildeab[km,kn,kf], eris.ovOV[km,kf,kn])
return fa,fb
def cc_Foo(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocc_a, nvir_a = t1a.shape
nocc_b, nvir_b = t1b.shape[1:]
kconserv = cc.khelper.kconserv
fa = np.zeros((nkpts,nocc_a,nocc_a), dtype=np.complex128)
fb = np.zeros((nkpts,nocc_b,nocc_b), dtype=np.complex128)
tau_tildeaa,tau_tildeab,tau_tildebb=make_tau(cc,t2,t1,t1,fac=0.5)
fov = eris.fock[0][:,:nocc_a,nocc_a:]
fOV = eris.fock[1][:,:nocc_b,nocc_b:]
foo = eris.fock[0][:,:nocc_a,:nocc_a]
fOO = eris.fock[1][:,:nocc_b,:nocc_b]
for ka in range(nkpts):
fa[ka]+=foo[ka]
fb[ka]+=fOO[ka]
fa[ka]+=0.5*einsum('me,ne->mn',fov[ka],t1a[ka])
fb[ka]+=0.5*einsum('me,ne->mn',fOV[ka],t1b[ka])
for km in range(nkpts):
fa[ka]+=einsum('oa,mnoa->mn',t1a[km],eris.ooov[ka,ka,km])
fa[ka]+=einsum('oa,mnoa->mn',t1b[km],eris.ooOV[ka,ka,km])
fa[ka]-=einsum('oa,onma->mn',t1a[km],eris.ooov[km,ka,ka])
fb[ka]+=einsum('oa,mnoa->mn',t1b[km],eris.OOOV[ka,ka,km])
fb[ka]+=einsum('oa,mnoa->mn',t1a[km],eris.OOov[ka,ka,km])
fb[ka]-=einsum('oa,onma->mn',t1b[km],eris.OOOV[km,ka,ka])
for km in range(nkpts):
for kn in range(nkpts):
for ke in range(nkpts):
kf = kconserv[km,ke,kn]
tmp = eris.ovov[km,ke,kn] - eris.ovov[km,kf,kn].transpose(0,3,2,1)
fa[km] += einsum('inef,menf->mi', tau_tildeaa[km,kn,ke], tmp) * .5
fa[km] += einsum('iNeF,meNF->mi',tau_tildeab[km,kn,ke],eris.ovOV[km,ke,kn])
tmp = eris.OVOV[km,ke,kn] - eris.OVOV[km,kf,kn].transpose(0,3,2,1)
fb[km] += einsum('INEF,MENF->MI',tau_tildebb[km,kn,ke], tmp) * .5
fb[km] += einsum('nIeF,neMF->MI',tau_tildeab[kn,km,ke],eris.ovOV[kn,ke,km])
return fa,fb
def cc_Fov(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocc_a, nvir_a = t1a.shape
nocc_b, nvir_b = t1b.shape[1:]
kconserv = cc.khelper.kconserv
fov = eris.fock[0][:,:nocc_a,nocc_a:]
fOV = eris.fock[1][:,:nocc_b,nocc_b:]
fa = np.zeros((nkpts,nocc_a,nvir_a), dtype=np.complex128)
fb = np.zeros((nkpts,nocc_b,nvir_b), dtype=np.complex128)
for km in range(nkpts):
fa[km]+=fov[km]
fb[km]+=fOV[km]
for kn in range(nkpts):
fa[km]+=einsum('nf,menf->me',t1a[kn],eris.ovov[km,km,kn])
fa[km]+=einsum('nf,menf->me',t1b[kn],eris.ovOV[km,km,kn])
fa[km]-=einsum('nf,mfne->me',t1a[kn],eris.ovov[km,kn,kn])
fb[km]+=einsum('nf,menf->me',t1b[kn],eris.OVOV[km,km,kn])
fb[km]+=einsum('nf,nfme->me',t1a[kn],eris.ovOV[kn,kn,km])
fb[km]-=einsum('nf,mfne->me',t1b[kn],eris.OVOV[km,kn,kn])
return fa,fb
def cc_Woooo(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape[1:]
dtype = np.result_type(t1a, t1b, t2aa, t2ab, t2bb)
Woooo = np.zeros(eris.oooo.shape, dtype=dtype)
WooOO = np.zeros(eris.ooOO.shape, dtype=dtype)
WOOOO = np.zeros(eris.OOOO.shape, dtype=dtype)
kconserv = cc.khelper.kconserv
tau_aa, tau_ab, tau_bb = make_tau(cc, t2, t1, t1)
for km in range(nkpts):
for kn in range(nkpts):
tmp_aaaaJ = einsum('xje, ymine->yxminj', t1a, eris.ooov[km,:,kn])
tmp_aaaaJ -= tmp_aaaaJ.transpose((1,0,2,5,4,3))
tmp_bbbbJ = einsum('xje, ymine->yxminj', t1b, eris.OOOV[km,:,kn])
tmp_bbbbJ -= tmp_bbbbJ.transpose((1,0,2,5,4,3))
tmp_aabbJ = einsum('xje, ymine->yxminj', t1b, eris.ooOV[km,:,kn])
tmp_baabJ = -einsum('yie,xmjne->yxminj', t1a, eris.OOov[km,:,kn])
Woooo[km,:,kn] += eris.oooo[km,:,kn]
WooOO[km,:,kn] += eris.ooOO[km,:,kn]
WOOOO[km,:,kn] += eris.OOOO[km,:,kn]
ki = range(nkpts)
kj = kconserv[km,ki,kn]
Woooo[km,ki,kn] += tmp_aaaaJ[ki,kj]
WOOOO[km,ki,kn] += tmp_bbbbJ[ki,kj]
WooOO[km,ki,kn] += tmp_aabbJ[ki,kj]
WooOO[kn,ki,km] -= tmp_baabJ[ki,kj].transpose(0,3,2,1,4)
Woooo[km,ki,kn] += 0.25*einsum('yxijef,xmenf->yminj', tau_aa[ki,kj], eris.ovov[km,ki,kn])
WOOOO[km,ki,kn] += 0.25*einsum('yxijef,xmenf->yminj', tau_bb[ki,kj], eris.OVOV[km,ki,kn])
WooOO[km,ki,kn] += 0.5*einsum('yxijef,xmenf->yminj', tau_ab[ki,kj], eris.ovOV[km,ki,kn])
Woooo = Woooo - Woooo.transpose(2,1,0,5,4,3,6)
WOOOO = WOOOO - WOOOO.transpose(2,1,0,5,4,3,6)
return Woooo, WooOO, WOOOO
def cc_Wvvvv(cc, t1, t2, eris):
t1a, t1b = t1
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
#:wvvvv = eris.vvvv.copy()
#:Wvvvv += np.einsum('ymb,zyxemfa,zxyw->wzyaebf', t1a, eris.vovv.conj(), P)
#:Wvvvv -= np.einsum('ymb,xyzfmea,xzyw->wzyaebf', t1a, eris.vovv.conj(), P)
#:Wvvvv = Wvvvv - Wvvvv.transpose(2,1,0,5,4,3,6)
Wvvvv = np.zeros_like(eris.vvvv)
for ka, kb, ke in kpts_helper.loop_kkk(cc.nkpts):
kf = kconserv[ka,ke,kb]
aebf = eris.vvvv[ka,ke,kb].copy()
aebf += einsum('mb,emfa->aebf', t1a[kb], eris.vovv[ke,kb,kf].conj())
aebf -= einsum('mb,fmea->aebf', t1a[kb], eris.vovv[kf,kb,ke].conj())
Wvvvv[ka,ke,kb] += aebf
Wvvvv[kb,ke,ka] -= aebf.transpose(2,1,0,3)
#:WvvVV = eris.vvVV.copy()
#:WvvVV -= np.einsum('xma,zxwemFB,zwxy->xzyaeBF', t1a, eris.voVV.conj(), P)
#:WvvVV -= np.einsum('yMB,wyzFMea,wzyx->xzyaeBF', t1b, eris.VOvv.conj(), P)
WvvVV = np.empty_like(eris.vvVV)
for ka, kb, ke in kpts_helper.loop_kkk(cc.nkpts):
kf = kconserv[ka,ke,kb]
aebf = eris.vvVV[ka,ke,kb].copy()
aebf -= einsum('ma,emfb->aebf', t1a[ka], eris.voVV[ke,ka,kf].conj())
aebf -= einsum('mb,fmea->aebf', t1b[kb], eris.VOvv[kf,kb,ke].conj())
WvvVV[ka,ke,kb] = aebf
#:WVVVV = eris.VVVV.copy()
#:WVVVV += np.einsum('ymb,zyxemfa,zxyw->wzyaebf', t1b, eris.VOVV.conj(), P)
#:WVVVV -= np.einsum('ymb,xyzfmea,xzyw->wzyaebf', t1b, eris.VOVV.conj(), P)
#:WVVVV = WVVVV - WVVVV.transpose(2,1,0,5,4,3,6)
WVVVV = np.zeros_like(eris.VVVV)
for ka, kb, ke in kpts_helper.loop_kkk(cc.nkpts):
kf = kconserv[ka,ke,kb]
aebf = eris.VVVV[ka,ke,kb].copy()
aebf += einsum('mb,emfa->aebf', t1b[kb], eris.VOVV[ke,kb,kf].conj())
aebf -= einsum('mb,fmea->aebf', t1b[kb], eris.VOVV[kf,kb,ke].conj())
WVVVV[ka,ke,kb] += aebf
WVVVV[kb,ke,ka] -= aebf.transpose(2,1,0,3)
return Wvvvv, WvvVV, WVVVV
#TODO: merge cc_Wvvvv_half and cc_Wvvvv
def cc_Wvvvv_half(cc, t1, t2, eris):
'''Similar to cc_Wvvvv, without anti-symmetrization'''
t1a, t1b = t1
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
#:wvvvv = eris.vvvv.copy()
#:Wvvvv += np.einsum('ymb,zyxemfa,zxyw->wzyaebf', t1a, eris.vovv.conj(), P)
#:Wvvvv -= np.einsum('ymb,xyzfmea,xzyw->wzyaebf', t1a, eris.vovv.conj(), P)
#:Wvvvv = Wvvvv - Wvvvv.transpose(2,1,0,5,4,3,6)
Wvvvv = np.zeros_like(eris.vvvv)
for ka, kb, ke in kpts_helper.loop_kkk(cc.nkpts):
kf = kconserv[ka,ke,kb]
aebf = eris.vvvv[ka,ke,kb].copy()
aebf += einsum('mb,emfa->aebf', t1a[kb], eris.vovv[ke,kb,kf].conj())
aebf -= einsum('mb,fmea->aebf', t1a[kb], eris.vovv[kf,kb,ke].conj())
Wvvvv[ka,ke,kb] += aebf
#:WvvVV = eris.vvVV.copy()
#:WvvVV -= np.einsum('xma,zxwemFB,zwxy->xzyaeBF', t1a, eris.voVV.conj(), P)
#:WvvVV -= np.einsum('yMB,wyzFMea,wzyx->xzyaeBF', t1b, eris.VOvv.conj(), P)
WvvVV = np.empty_like(eris.vvVV)
for ka, kb, ke in kpts_helper.loop_kkk(cc.nkpts):
kf = kconserv[ka,ke,kb]
aebf = eris.vvVV[ka,ke,kb].copy()
aebf -= einsum('ma,emfb->aebf', t1a[ka], eris.voVV[ke,ka,kf].conj())
aebf -= einsum('mb,fmea->aebf', t1b[kb], eris.VOvv[kf,kb,ke].conj())
WvvVV[ka,ke,kb] = aebf
#:WVVVV = eris.VVVV.copy()
#:WVVVV += np.einsum('ymb,zyxemfa,zxyw->wzyaebf', t1b, eris.VOVV.conj(), P)
#:WVVVV -= np.einsum('ymb,xyzfmea,xzyw->wzyaebf', t1b, eris.VOVV.conj(), P)
#:WVVVV = WVVVV - WVVVV.transpose(2,1,0,5,4,3,6)
WVVVV = np.zeros_like(eris.VVVV)
for ka, kb, ke in kpts_helper.loop_kkk(cc.nkpts):
kf = kconserv[ka,ke,kb]
aebf = eris.VVVV[ka,ke,kb].copy()
aebf += einsum('mb,emfa->aebf', t1b[kb], eris.VOVV[ke,kb,kf].conj())
aebf -= einsum('mb,fmea->aebf', t1b[kb], eris.VOVV[kf,kb,ke].conj())
WVVVV[ka,ke,kb] += aebf
return Wvvvv, WvvVV, WVVVV
def Wvvvv(cc, t1, t2, eris):
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
tauaa, tauab, taubb = make_tau(cc, t2, t1, t1)
Wvvvv, WvvVV, WVVVV = cc_Wvvvv(cc, t1, t2, eris)
for ka, kb, ke in kpts_helper.loop_kkk(cc.nkpts):
kf = kconserv[ka,ke,kb]
for km in range(nkpts):
kn = kconserv[ka,km,kb]
Wvvvv[ka,ke,kb] += einsum('mnab,menf->aebf', tauaa[km,kn,ka], eris.ovov[km,ke,kn])
WvvVV[ka,ke,kb] += einsum('mNaB,meNF->aeBF', tauab[km,kn,ka], eris.ovOV[km,ke,kn])
WVVVV[ka,ke,kb] += einsum('mnab,menf->aebf', taubb[km,kn,ka], eris.OVOV[km,ke,kn])
return Wvvvv, WvvVV, WVVVV
def get_Wvvvv(cc, t1, t2, eris, ka, kb, kc):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb = cc.nocc
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
kd = kconserv[ka, kc, kb]
if getattr(eris, 'Lpv', None) is not None:
# Using GDF to generate Wvvvv on the fly
Lpv = eris.Lpv
LPV = eris.LPV
Lac = (Lpv[ka,kc][:,nocca:] -
einsum('Lkc,ka->Lac', Lpv[ka,kc][:,:nocca], t1a[ka]))
Lbd = (Lpv[kb,kd][:,nocca:] -
einsum('Lkd,kb->Lbd', Lpv[kb,kd][:,:nocca], t1a[kb]))
Lbc = (Lpv[kb,kc][:,nocca:] -
einsum('Lkc,ka->Lac', Lpv[kb,kc][:,:nocca], t1a[kb]))
Lad = (Lpv[ka,kd][:,nocca:] -
einsum('Lkd,kb->Lbd', Lpv[ka,kd][:,:nocca], t1a[ka]))
LAC = (LPV[ka,kc][:,noccb:] -
einsum('Lkd,kb->Lbd', LPV[ka,kc][:,:noccb], t1b[ka]))
LBD = (LPV[kb,kd][:,noccb:] -
einsum('Lkd,kb->Lbd', LPV[kb,kd][:,:noccb], t1b[kb]))
LBC = (LPV[kb,kc][:,noccb:] -
einsum('Lkc,ka->Lac', LPV[kb,kc][:,:noccb], t1b[kb]))
LAD = (LPV[ka,kd][:,noccb:] -
einsum('Lkd,kb->Lbd', LPV[ka,kd][:,:noccb], t1b[ka]))
vvvv = einsum('Lac,Lbd->acbd', Lac, Lbd)
vvvv-= einsum('Lbc,Lad->acbd', Lbc, Lad)
vvVV = einsum('Lac,Lbd->acbd', Lac, LBD)
VVVV = einsum('Lac,Lbd->acbd', LAC, LBD)
VVVV-= einsum('Lbc,Lad->acbd', LBC, LAD)
vvvv *= (1./nkpts)
vvVV *= (1./nkpts)
VVVV *= (1./nkpts)
else:
vvvv = einsum('emfa,mb->aebf', eris.vovv[kc,kb,kd].conj(), t1a[kb])
vvvv -= einsum('fmea,mb->aebf', eris.vovv[kd,kb,kc].conj(), t1a[kb])
vvvv -= einsum('emfb,ma->aebf', eris.vovv[kc,ka,kd].conj(), t1a[ka])
vvvv += einsum('fmeb,ma->aebf', eris.vovv[kd,ka,kc].conj(), t1a[ka])
vvvv += eris.vvvv[ka,kc,kb]
vvvv -= eris.vvvv[kb,kc,ka].transpose(2,1,0,3)
vvvv += einsum('mcnf,ma,nb->acbf', eris.ovov[ka,kc,kb], t1a[ka], t1a[kb])
vvvv -= einsum('mcnf,mb,na->acbf', eris.ovov[kb,kc,ka], t1a[kb], t1a[ka])
vvVV = einsum('emfb,ma->aebf', eris.voVV[kc,ka,kd].conj(),-t1a[ka])
vvVV += einsum('fmea,mb->aebf', eris.VOvv[kd,kb,kc].conj(),-t1b[kb])
vvVV += einsum('mcnf,ma,nb->acbf', eris.ovOV[ka,kc,kb], t1a[ka], t1b[kb])
vvVV += eris.vvVV[ka,kc,kb]
VVVV = einsum('emfa,mb->aebf', eris.VOVV[kc,kb,kd].conj(), t1b[kb])
VVVV -= einsum('fmea,mb->aebf', eris.VOVV[kd,kb,kc].conj(), t1b[kb])
VVVV -= einsum('emfb,ma->aebf', eris.VOVV[kc,ka,kd].conj(), t1b[ka])
VVVV += einsum('fmeb,ma->aebf', eris.VOVV[kd,ka,kc].conj(), t1b[ka])
VVVV += eris.VVVV[ka,kc,kb]
VVVV -= eris.VVVV[kb,kc,ka].transpose(2,1,0,3)
VVVV += einsum('mcnf,ma,nb->acbf', eris.OVOV[ka,kc,kb], t1b[ka], t1b[kb])
VVVV -= einsum('mcnf,mb,na->acbf', eris.OVOV[kb,kc,ka], t1b[kb], t1b[ka])
for km in range(nkpts):
kn = kconserv[ka,km,kb]
vvvv += einsum('mnab,mcnf->acbf', t2aa[km,kn,ka], eris.ovov[km,kc,kn])
vvVV += einsum('mNaB,mcNF->acBF', t2ab[km,kn,ka], eris.ovOV[km,kc,kn])
VVVV += einsum('mnab,mcnf->acbf', t2bb[km,kn,ka], eris.OVOV[km,kc,kn])
return vvvv, vvVV, VVVV
def cc_Wovvo(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape[1:]
kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
dtype = np.result_type(*t2)
Wovvo = np.zeros((nkpts,nkpts,nkpts,nocca,nvira,nvira,nocca), dtype)
WovVO = np.zeros((nkpts,nkpts,nkpts,nocca,nvira,nvirb,noccb), dtype)
WOVvo = np.zeros((nkpts,nkpts,nkpts,noccb,nvirb,nvira,nocca), dtype)
WOVVO = np.zeros((nkpts,nkpts,nkpts,noccb,nvirb,nvirb,noccb), dtype)
WoVVo = np.zeros((nkpts,nkpts,nkpts,nocca,nvirb,nvirb,nocca), dtype)
WOvvO = np.zeros((nkpts,nkpts,nkpts,noccb,nvira,nvira,noccb), dtype)
for ka, ki, kj in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ka,ki,kj]
Wovvo[ki,ka,kb] += eris.voov[ka,ki,kj].conj().transpose(1,0,3,2)
WovVO[ki,ka,kb] += eris.voOV[ka,ki,kj].conj().transpose(1,0,3,2)
WOVvo[ki,ka,kb] += eris.voOV[kb,kj,ki].transpose(2,3,0,1)
WOVVO[ki,ka,kb] += eris.VOOV[ka,ki,kj].conj().transpose(1,0,3,2)
kb = kconserv[ki,kj,ka]
Wovvo[ki,kb,ka] -= eris.oovv[ki,kj,ka].transpose(0,3,2,1)
WOVVO[ki,kb,ka] -= eris.OOVV[ki,kj,ka].transpose(0,3,2,1)
WoVVo[ki,kb,ka] -= eris.ooVV[ki,kj,ka].transpose(0,3,2,1)
WOvvO[ki,kb,ka] -= eris.OOvv[ki,kj,ka].transpose(0,3,2,1)
tauaa, tauab, taubb = make_tau2(cc, t2, t1, t1,fac=2.0)
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
vovv = eris.vovv[ke,km,kj].conj()
VOVV = eris.VOVV[ke,km,kj].conj()
voVV = eris.voVV[ke,km,kj].conj()
VOvv = eris.VOvv[ke,km,kj].conj()
Wovvo[km,ke,kb] += einsum('jf, emfb->mebj', t1a[kj], vovv)
WOVVO[km,ke,kb] += einsum('jf, emfb->mebj', t1b[kj], VOVV)
WovVO[km,ke,kb] += einsum('jf, emfb->mebj', t1b[kj], voVV)
WOVvo[km,ke,kb] += einsum('jf, emfb->mebj', t1a[kj], VOvv)
##### warnings for Ks
Wovvo[km,kj,kb] -= einsum('je, emfb->mfbj', t1a[ke], vovv)
WOVVO[km,kj,kb] -= einsum('je, emfb->mfbj', t1b[ke], VOVV)
WOvvO[km,kj,kb] -= einsum('je, emfb->mfbj', t1b[ke], VOvv)
WoVVo[km,kj,kb] -= einsum('je, emfb->mfbj', t1a[ke], voVV)
WOVvo[km,ke,kb] -= einsum('nb, njme->mebj', t1a[kb], eris.ooOV[kb,kj,km])
WovVO[km,ke,kb] -= einsum('nb, njme->mebj', t1b[kb], eris.OOov[kb,kj,km])
WOvvO[km,ke,kb] += einsum('nb, mjne->mebj', t1a[kb], eris.OOov[km,kj,kb])
WoVVo[km,ke,kb] += einsum('nb, mjne->mebj', t1b[kb], eris.ooOV[km,kj,kb])
ooov_temp = eris.ooov[kb,kj,km] - eris.ooov[km,kj,kb].transpose((2,1,0,3))
Wovvo[km,ke,kb] -= einsum('nb, njme->mebj', t1a[kb], ooov_temp)
ooov_temp = None
OOOV_temp = eris.OOOV[kb,kj,km] - eris.OOOV[km,kj,kb].transpose((2,1,0,3))
WOVVO[km,ke,kb] -= einsum('nb, njme->mebj', t1b[kb], OOOV_temp)
OOOV_temp = None
Wovvo[km,ke,kb] += 0.5*einsum('xjnbf,xmenf->mebj', t2ab[kj,:,kb], eris.ovOV[km,ke,:])
WOvvO[km,ke,kb] += 0.5*einsum('xnjbf,xnemf->mebj', tauab[:,kj,kb], eris.ovOV[:,ke,km])
WovVO[km,ke,kb] -= 0.5*einsum('xnjbf,xmenf->mebj', taubb[:,kj,kb], eris.ovOV[km,ke,:])
temp_ovOV_1 = np.zeros([nkpts, nocca, nvira, noccb, nvirb], dtype=dtype)
temp_ovOV_2 = np.zeros([nkpts, nocca, nvira, noccb, nvirb], dtype=dtype)
for kn in range(nkpts):
kf = kconserv[km,ke,kn]
temp_ovOV_1[kn] += eris.ovOV[kn,kf,km].copy()
temp_ovOV_2[kn] += eris.ovOV[km,kf,kn].copy()
kn = range(nkpts)
kf = kconserv[km,ke][kn]
WOVVO[km,ke,kb] += 0.5*einsum('xnjfb,xnfme->mebj', t2ab[kn,kj,kf], temp_ovOV_1)
WOVvo[km,ke,kb] -= 0.5*einsum('xnjbf,xnfme->mebj', tauaa[:,kj,kb], temp_ovOV_1)
WoVVo[km,ke,kb] += 0.5*einsum('xjnfb,xmfne->mebj', tauab[kj,kn,kf], temp_ovOV_2)
temp_OVOV = eris.OVOV[km,ke,:] - eris.OVOV[:,ke,km].transpose((0,3,2,1,4))
WOVVO[km,ke,kb] -= 0.5*einsum('xnjbf,xmenf->mebj', taubb[:,kj,kb], temp_OVOV)
WOVvo[km,ke,kb] += 0.5*einsum('xjnbf,xmenf->mebj', t2ab[kj,:,kb], temp_OVOV)
temp_OVOV = None
temp_ovov = eris.ovov[:,ke,km] - eris.ovov[km,ke,:].transpose((0,3,2,1,4))
Wovvo[km,ke,kb] += 0.5*einsum('xnjbf,xnemf->mebj', tauaa[:,kj,kb], temp_ovov)
WovVO[km,ke,kb] -= 0.5*einsum('xnjfb,xnemf->mebj', t2ab[kn,kj,kf], temp_ovov)
temp_ovov = None
return Wovvo, WovVO, WOVvo, WOVVO, WoVVo, WOvvO
def _cc_Wovvo_k0k2(cc, t1, t2, eris, k0, k2):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape[1:]
kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
dtype = np.result_type(*t2)
Wovvo = np.zeros((nkpts,nocca,nvira,nvira,nocca), dtype)
WovVO = np.zeros((nkpts,nocca,nvira,nvirb,noccb), dtype)
WOVvo = np.zeros((nkpts,noccb,nvirb,nvira,nocca), dtype)
WOVVO = np.zeros((nkpts,noccb,nvirb,nvirb,noccb), dtype)
WoVVo = np.zeros((nkpts,nocca,nvirb,nvirb,nocca), dtype)
WOvvO = np.zeros((nkpts,noccb,nvira,nvira,noccb), dtype)
#:P = kconserv_mat(cc.nkpts, kconserv)
#:Wovvo = np.einsum('xyzaijb,xzyw->yxwiabj', eris.voov, P).conj()
#:WovVO = np.einsum('xyzaijb,xzyw->yxwiabj', eris.voOV, P).conj()
#:WOVvo = np.einsum('wzybjia,xzyw->yxwiabj', eris.voOV, P)
#:WOVVO = np.einsum('xyzaijb,xzyw->yxwiabj', eris.VOOV, P).conj()
#:Wovvo-= np.einsum('xyzijab,xzyw->xwzibaj', eris.oovv, P)
#:WOVVO-= np.einsum('xyzijab,xzyw->xwzibaj', eris.OOVV, P)
#:WoVVo = np.einsum('xyzijab,xzyw->xwzibaj', eris.ooVV, -P)
#:WOvvO = np.einsum('xyzijab,xzyw->xwzibaj', eris.OOvv, -P)
for kj in range(nkpts):
ka = kconserv[k2,kj,k0]
Wovvo[ka] += eris.voov[ka,k0,kj].conj().transpose(1,0,3,2)
WovVO[ka] += eris.voOV[ka,k0,kj].conj().transpose(1,0,3,2)
WOVvo[ka] += eris.voOV[k2,kj,k0].transpose(2,3,0,1)
WOVVO[ka] += eris.VOOV[ka,k0,kj].conj().transpose(1,0,3,2)
for kj in range(nkpts):
kb = kconserv[k0,kj,k2]
Wovvo[kb] -= eris.oovv[k0,kj,k2].transpose(0,3,2,1)
WOVVO[kb] -= eris.OOVV[k0,kj,k2].transpose(0,3,2,1)
WoVVo[kb] -= eris.ooVV[k0,kj,k2].transpose(0,3,2,1)
WOvvO[kb] -= eris.OOvv[k0,kj,k2].transpose(0,3,2,1)
for ke in range(nkpts):
kj = kconserv[k0,ke,k2]
vovv = eris.vovv[ke,k0,kj].conj()
VOVV = eris.VOVV[ke,k0,kj].conj()
voVV = eris.voVV[ke,k0,kj].conj()
VOvv = eris.VOvv[ke,k0,kj].conj()
Wovvo[ke] += einsum('jf, emfb->mebj', t1a[kj], vovv)
WOVVO[ke] += einsum('jf, emfb->mebj', t1b[kj], VOVV)
WovVO[ke] += einsum('jf, emfb->mebj', t1b[kj], voVV)
WOVvo[ke] += einsum('jf, emfb->mebj', t1a[kj], VOvv)
Wovvo[kj] -= einsum('je, emfb->mfbj', t1a[ke], vovv)
WOVVO[kj] -= einsum('je, emfb->mfbj', t1b[ke], VOVV)
WOvvO[kj] -= einsum('je, emfb->mfbj', t1b[ke], VOvv)
WoVVo[kj] -= einsum('je, emfb->mfbj', t1a[ke], voVV)
Wovvo[ke] -= einsum('nb, njme->mebj', t1a[k2], eris.ooov[k2,kj,k0])
WOVvo[ke] -= einsum('nb, njme->mebj', t1a[k2], eris.ooOV[k2,kj,k0])
WOVVO[ke] -= einsum('nb, njme->mebj', t1b[k2], eris.OOOV[k2,kj,k0])
WovVO[ke] -= einsum('nb, njme->mebj', t1b[k2], eris.OOov[k2,kj,k0])
Wovvo[ke] += einsum('nb, mjne->mebj', t1a[k2], eris.ooov[k0,kj,k2])
WOVVO[ke] += einsum('nb, mjne->mebj', t1b[k2], eris.OOOV[k0,kj,k2])
WoVVo[ke] += einsum('nb, mjne->mebj', t1b[k2], eris.ooOV[k0,kj,k2])
WOvvO[ke] += einsum('nb, mjne->mebj', t1a[k2], eris.OOov[k0,kj,k2])
for kn in range(nkpts):
kf = kconserv[k0,ke,kn]
tmp = eris.ovov[k0,ke,kn] - eris.ovov[kn,ke,k0].transpose(2,1,0,3)
Wovvo[ke] -= 0.5*einsum('jnfb,menf->mebj', t2aa[kj,kn,kf], tmp)
Wovvo[ke] += 0.5*einsum('jnbf,menf->mebj', t2ab[kj,kn,k2], eris.ovOV[k0,ke,kn])
tmp = eris.OVOV[k0,ke,kn] - eris.OVOV[kn,ke,k0].transpose(2,1,0,3)
WOVVO[ke] -= 0.5*einsum('jnfb,menf->mebj', t2bb[kj,kn,kf], tmp)
WOVVO[ke] += 0.5*einsum('njfb,nfme->mebj', t2ab[kn,kj,kf], eris.ovOV[kn,kf,k0])
tmp = eris.ovov[k0,ke,kn] - eris.ovov[kn,ke,k0].transpose(2,1,0,3)
WovVO[ke] += 0.5*einsum('njfb,menf->mebj', t2ab[kn,kj,kf], tmp)
WovVO[ke] -= 0.5*einsum('jnfb,menf->mebj', t2bb[kj,kn,kf], eris.ovOV[k0,ke,kn])
tmp = eris.OVOV[k0,ke,kn] - eris.OVOV[kn,ke,k0].transpose(2,1,0,3)
WOVvo[ke] += 0.5*einsum('jnbf,menf->mebj', t2ab[kj,kn,k2], tmp)
WOVvo[ke] -= 0.5*einsum('jnfb,nfme->mebj', t2aa[kj,kn,kf], eris.ovOV[kn,kf,k0])
WoVVo[ke] += 0.5*einsum('jnfb,mfne->mebj', t2ab[kj,kn,kf], eris.ovOV[k0,kf,kn])
WOvvO[ke] += 0.5*einsum('njbf,nemf->mebj', t2ab[kn,kj,k2], eris.ovOV[kn,ke,k0])
if kn == k2 and kf == kj:
tmp = einsum('menf,jf->menj', eris.ovov[k0,ke,kn], t1a[kj])
tmp-= einsum('nemf,jf->menj', eris.ovov[kn,ke,k0], t1a[kj])
Wovvo[ke] -= einsum('nb,menj->mebj', t1a[kn], tmp)
tmp = einsum('menf,jf->menj', eris.OVOV[k0,ke,kn], t1b[kj])
tmp-= einsum('nemf,jf->menj', eris.OVOV[kn,ke,k0], t1b[kj])
WOVVO[ke] -= einsum('nb,menj->mebj', t1b[kn], tmp)
WovVO[ke] -= einsum('jf,nb,menf->mebj',t1b[kj],t1b[kn], eris.ovOV[k0,ke,kn])
WOVvo[ke] -= einsum('jf,nb,nfme->mebj',t1a[kj],t1a[kn], eris.ovOV[kn,kf,k0])
WoVVo[ke] += einsum('jf,nb,mfne->mebj',t1a[kj],t1b[kn], eris.ovOV[k0,kf,kn])
WOvvO[ke] += einsum('jf,nb,nemf->mebj',t1b[kj],t1a[kn], eris.ovOV[kn,ke,k0])
return Wovvo, WovVO, WOVvo, WOVVO, WoVVo, WOvvO
def kconserv_mat(nkpts, kconserv):
P = np.zeros((nkpts,nkpts,nkpts,nkpts))
for ki in range(nkpts):
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[ki,ka,kj]
P[ki,kj,ka,kb] = 1
return P
def Foo(cc,t1,t2,eris):
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, noccb, nvira, nvirb = t2ab.shape[2:]
Fova, Fovb = cc_Fov(cc,t1,t2,eris)
Fooa, Foob = cc_Foo(cc,t1,t2,eris)
for ki in range(nkpts):
Fooa[ki] += 0.5*einsum('ie,me->mi',t1a[ki],Fova[ki])
Foob[ki] += 0.5*einsum('ie,me->mi',t1b[ki],Fovb[ki])
return Fooa, Foob
def Fvv(cc,t1,t2,eris):
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, noccb, nvira, nvirb = t2ab.shape[2:]
Fova, Fovb = cc_Fov(cc,t1,t2,eris)
Fvva, Fvvb = cc_Fvv(cc,t1,t2,eris)
for ka in range(nkpts):
Fvva[ka] -= 0.5*lib.einsum('me,ma->ae', Fova[ka], t1a[ka])
Fvvb[ka] -= 0.5*lib.einsum('me,ma->ae', Fovb[ka], t1b[ka])
return Fvva, Fvvb
def Fov(cc,t1,t2,eris):
Fme = cc_Fov(cc,t1,t2,eris)
return Fme
def Wvvov(cc,t1,t2,eris):
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, noccb, nvira, nvirb = t2ab.shape[2:]
Wvvov = np.zeros((nkpts,nkpts,nkpts,nvira,nvira,nocca,nvira),dtype=t1a.dtype)
WvvOV = np.zeros((nkpts,nkpts,nkpts,nvira,nvira,noccb,nvirb),dtype=t1a.dtype)
WVVov = np.zeros((nkpts,nkpts,nkpts,nvirb,nvirb,nocca,nvira),dtype=t1a.dtype)
WVVOV = np.zeros((nkpts,nkpts,nkpts,nvirb,nvirb,noccb,nvirb),dtype=t1a.dtype)
for kn, km, ke in itertools.product(range(nkpts),repeat=3):
kf = kconserv[kn, ke, km]
ka = kn
Wvvov[ka,ke,km] += eris.vovv[kf,km,ke].transpose(3,2,1,0).conj() - eris.vovv[ke,km,kf].transpose(3,0,1,2).conj()
WVVov[ka,ke,km] += eris.voVV[kf,km,ke].transpose(3,2,1,0).conj()
WvvOV[ka,ke,km] += eris.VOvv[kf,km,ke].transpose(3,2,1,0).conj()
WVVOV[ka,ke,km] += eris.VOVV[kf,km,ke].transpose(3,2,1,0).conj() - eris.VOVV[ke,km,kf].transpose(3,0,1,2).conj()
ovov = eris.ovov[kn, ke, km] - eris.ovov[kn, kf, km].transpose(0,3,2,1)
OVOV = eris.OVOV[kn, ke, km] - eris.OVOV[kn, kf, km].transpose(0,3,2,1)
Wvvov[ka,ke,km] += -lib.einsum('na,nemf->aemf',t1a[kn],ovov)
WvvOV[ka,ke,km] += -lib.einsum('na,neMF->aeMF',t1a[kn],eris.ovOV[kn,ke,km])
WVVov[ka,ke,km] += -lib.einsum('NA,NEmf->AEmf',t1b[kn],eris.OVov[kn,ke,km])
WVVOV[ka,ke,km] += -lib.einsum('NA,NEMF->AEMF',t1b[kn],OVOV)
return Wvvov, WvvOV, WVVov, WVVOV
def Wvvvo(cc,t1,t2,eris):
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, noccb, nvira, nvirb = t2ab.shape[2:]
fova, fovb = cc_Fov(cc, t1, t2, eris)
tauaa, tauab, taubb = make_tau(cc, t2, t1, t1)
Wvvvo = np.zeros((nkpts,nkpts,nkpts,nvira,nvira,nvira,nocca),dtype=t1a.dtype)
WvvVO = np.zeros((nkpts,nkpts,nkpts,nvira,nvira,nvirb,noccb),dtype=t1a.dtype)
WVVvo = np.zeros((nkpts,nkpts,nkpts,nvirb,nvirb,nvira,nocca),dtype=t1a.dtype)
WVVVO = np.zeros((nkpts,nkpts,nkpts,nvirb,nvirb,nvirb,noccb),dtype=t1a.dtype)
for ka, ke, kb in itertools.product(range(nkpts),repeat=3):
ki = kconserv[ka, ke, kb]
# - <mb||ef> t2(miaf)
for km in range(nkpts):
kf = kconserv[km,ke,kb]
ovvv = eris.vovv[ke,km,kf].transpose(1,0,3,2).conj() - eris.vovv[kf,km,ke].transpose(1,2,3,0).conj()
OVvv = eris.VOvv[ke,km,kf].transpose(1,0,3,2).conj()
ovVV = eris.voVV[ke,km,kf].transpose(1,0,3,2).conj()
OVVV = eris.VOVV[ke,km,kf].transpose(1,0,3,2).conj() - eris.VOVV[kf,km,ke].transpose(1,2,3,0).conj()
aebi = lib.einsum('mebf,miaf->aebi',ovvv,t2aa[km,ki,ka])
aebi += lib.einsum('MFbe,iMaF->aebi',eris.VOvv[kf,km,ke].transpose(1,0,3,2).conj(),t2ab[ki,km,ka])
Wvvvo[ka,ke,kb] -= aebi
# P(ab) for all alpha spin
Wvvvo[kb,ke,ka] += aebi.transpose(2,1,0,3)
WVVvo[ka,ke,kb] -= lib.einsum('MEbf,iMfA->AEbi',OVvv,t2ab[ki,km,kf])
WvvVO[ka,ke,kb] -= lib.einsum('meBF,mIaF->aeBI',ovVV,t2ab[km,ki,ka])
AEBI = lib.einsum('MEBF,MIAF->AEBI',OVVV,t2bb[km,ki,ka])
AEBI += lib.einsum('mfBE,mIfA->AEBI',eris.voVV[kf,km,ke].transpose(1,0,3,2).conj(),t2ab[km,ki,kf])
WVVVO[ka,ke,kb] -= AEBI
# P(ab) for all beta spin
WVVVO[kb,ke,ka] += AEBI.transpose(2,1,0,3)
# - t1(ma) (<mb||ei> - t2(nibf) <mn||ef>)
km = ka
ovvo = eris.voov[ke,km,ki].transpose(1,0,3,2).conj() - eris.oovv[km,ki,kb].transpose(0,3,2,1)
OVvo = eris.VOov[ke,km,ki].transpose(1,0,3,2).conj()
ovVO = eris.voOV[ke,km,ki].transpose(1,0,3,2).conj()
OVVO = eris.VOOV[ke,km,ki].transpose(1,0,3,2).conj() - eris.OOVV[km,ki,kb].transpose(0,3,2,1)
tmp1aa = np.zeros((nocca, nvira, nvira, nocca),dtype=t1a.dtype)
tmp1ab = np.zeros((nocca, nvira, nvirb, noccb),dtype=t1a.dtype)
tmp1ba = np.zeros((noccb, nvirb, nvira, nocca),dtype=t1a.dtype)
tmp1bb = np.zeros((noccb, nvirb, nvirb, noccb),dtype=t1a.dtype)
for kn in range(nkpts):
kf = kconserv[km,ke,kn]
ovov = eris.ovov[km,ke,kn] - eris.ovov[km,kf,kn].transpose(0,3,2,1)
OVov = eris.OVov[km,ke,kn]
ovOV = eris.ovOV[km,ke,kn]
OVOV = eris.OVOV[km,ke,kn] - eris.OVOV[km,kf,kn].transpose(0,3,2,1)
tmp1aa -= einsum('nibf,menf->mebi',t2aa[kn,ki,kb], ovov)
tmp1aa += einsum('iNbF,meNF->mebi',t2ab[ki,kn,kb], ovOV)
tmp1ab += einsum('nIfB,menf->meBI',t2ab[kn,ki,kf], ovov)
tmp1ab -= einsum('NIBF,meNF->meBI',t2bb[kn,ki,kb], ovOV)
tmp1ba += einsum('iNbF,MENF->MEbi',t2ab[ki,kn,kb], OVOV)
tmp1ba -= einsum('nibf,MEnf->MEbi',t2aa[kn,ki,kb], OVov)
tmp1bb -= einsum('NIBF,MENF->MEBI',t2bb[kn,ki,kb], OVOV)
tmp1bb += einsum('nIfB,MEnf->MEBI',t2ab[kn,ki,kf], OVov)
aebi = einsum('ma,mebi->aebi',t1a[km],(ovvo+tmp1aa))
Wvvvo[ka,ke,kb] -= aebi
WVVvo[ka,ke,kb] -= einsum('MA,MEbi->AEbi',t1b[km],OVvo+tmp1ba)
WvvVO[ka,ke,kb] -= einsum('ma,meBI->aeBI',t1a[km],ovVO+tmp1ab)
AEBI = einsum('MA,MEBI->AEBI',t1b[km],(OVVO+tmp1bb))
WVVVO[ka,ke,kb] -= AEBI
for ka, ke, kb in itertools.product(range(nkpts),repeat=3):
ki = kconserv[ka, ke, kb]
# P(ab) <mb||ef> t2(miaf) (alpha alpha beta beta) and (beta beta alpha alpha)
for km in range(nkpts):
kf = kconserv[km,ke,ka]
OVVV = eris.VOVV[ke,km,kf].transpose(1,0,3,2).conj() - eris.VOVV[kf,km,ke].transpose(1,2,3,0).conj()
ovvv = eris.vovv[ke,km,kf].transpose(1,0,3,2).conj() - eris.vovv[kf,km,ke].transpose(1,2,3,0).conj()
WVVvo[ka,ke,kb] -= lib.einsum('mfAE,mibf->AEbi', eris.voVV[kf,km,ke].transpose(1,0,3,2).conj(), t2aa[km,ki,kb])
WVVvo[ka,ke,kb] -= lib.einsum('MEAF,iMbF->AEbi', OVVV, t2ab[ki,km,kb])
WvvVO[ka,ke,kb] -= lib.einsum('MFae,MIBF->aeBI', eris.VOvv[kf,km,ke].transpose(1,0,3,2).conj(), t2bb[km,ki,kb])
WvvVO[ka,ke,kb] -= lib.einsum('meaf,mIfB->aeBI', ovvv, t2ab[km,ki,kf])
# P(ab) -t1(ma) (<mb||ei> - t2(nibf) <mn||ef>) for all spin configurations
km = kb
ovvo = eris.voov[ke,km,ki].transpose(1,0,3,2).conj() - eris.oovv[km,ki,ka].transpose(0,3,2,1)
OVVO = eris.VOOV[ke,km,ki].transpose(1,0,3,2).conj() - eris.OOVV[km,ki,ka].transpose(0,3,2,1)
tmp1aa = np.zeros((nocca, nvira, nvira, nocca),dtype=t1a.dtype)
tmp1ab = np.zeros((noccb, nvira, nvira, noccb),dtype=t1a.dtype)
tmp1ba = np.zeros((nocca, nvirb, nvirb, nocca),dtype=t1a.dtype)
tmp1bb = np.zeros((noccb, nvirb, nvirb, noccb),dtype=t1a.dtype)
for kn in range(nkpts):
kf = kconserv[km,ke,kn]
ovov = eris.ovov[km,ke,kn] - eris.ovov[km,kf,kn].transpose(0,3,2,1)
OVov = eris.OVov[km,ke,kn]
ovOV = eris.ovOV[km,ke,kn]
OVOV = eris.OVOV[km,ke,kn] - eris.OVOV[km,kf,kn].transpose(0,3,2,1)
tmp1aa -= einsum('niaf,menf->meai',t2aa[kn,ki,ka], ovov)
tmp1aa += einsum('iNaF,meNF->meai',t2ab[ki,kn,ka], ovOV)
tmp1ab += einsum('nIaF,MFne->MeaI',t2ab[kn,ki,ka], eris.OVov[km,kf,kn])
tmp1ba += einsum('iNfA,mfNE->mEAi',t2ab[ki,kn,kf], eris.ovOV[km,kf,kn])
tmp1bb -= einsum('NIAF,MENF->MEAI',t2bb[kn,ki,ka], OVOV)
tmp1bb += einsum('nIfA,MEnf->MEAI',t2ab[kn,ki,kf], OVov)
aebi = einsum('mb,meai->aebi',t1a[km],(ovvo+tmp1aa))
Wvvvo[ka,ke,kb] += aebi
WVVvo[ka,ke,kb] += einsum('mb,mEAi->AEbi',t1a[km], -eris.ooVV[km,ki,ka].transpose(0,3,2,1)+tmp1ba)
WvvVO[ka,ke,kb] += einsum('MB,MeaI->aeBI',t1b[km], -eris.OOvv[km,ki,ka].transpose(0,3,2,1)+tmp1ab)
AEBI = einsum('MB,MEAI->AEBI',t1b[km],(OVVO+tmp1bb))
WVVVO[ka,ke,kb] += AEBI
# Remaining terms
for ka, ke, kb in itertools.product(range(nkpts),repeat=3):
ki = kf = kconserv[ka, ke, kb]
Wvvvo[ka,ke,kb] += eris.vovv[kb,ki,ka].transpose(2,3,0,1) - eris.vovv[ka,ki,kb].transpose(0,3,2,1)
WVVvo[ka,ke,kb] += eris.voVV[kb,ki,ka].transpose(2,3,0,1)
WvvVO[ka,ke,kb] += eris.VOvv[kb,ki,ka].transpose(2,3,0,1)
WVVVO[ka,ke,kb] += eris.VOVV[kb,ki,ka].transpose(2,3,0,1) - eris.VOVV[ka,ki,kb].transpose(0,3,2,1)
Wvvvo[ka,ke,kb] -= lib.einsum('me,miab->aebi',fova[ke],t2aa[ke,ki,ka])
WVVvo[ka,ke,kb] -= lib.einsum('ME,iMbA->AEbi',fovb[ke],t2ab[ki,ke,kb])
WvvVO[ka,ke,kb] -= lib.einsum('me,mIaB->aeBI',fova[ke],t2ab[ke,ki,ka])
WVVVO[ka,ke,kb] -= lib.einsum('ME,MIAB->AEBI',fovb[ke],t2bb[ke,ki,ka])
Wvvvv, WvvVV, WVVVV = get_Wvvvv(cc, t1, t2, eris, ka, kb, ke)
Wvvvo[ka,ke,kb] += lib.einsum('if,aebf->aebi', t1a[ki], Wvvvv)
WVVvo[kb,kf,ka] += lib.einsum('ie,aeBF->BFai', t1a[ke], WvvVV)
WvvVO[ka,ke,kb] += lib.einsum('IF,aeBF->aeBI', t1b[ki], WvvVV)
WVVVO[ka,ke,kb] += lib.einsum('IF,AEBF->AEBI', t1b[ki], WVVVV)
for km in range(nkpts):
kn = kconserv[ka,km,kb]
ovoo = eris.ooov[kn,ki,km].transpose(2,3,0,1) - eris.ooov[km,ki,kn].transpose(0,3,2,1)
ovOO = eris.OOov[kn,ki,km].transpose(2,3,0,1)
ooOV = eris.ooOV[km,ki,kn]
OOov = eris.OOov[km,ki,kn]
OVoo = eris.ooOV[kn,ki,km].transpose(2,3,0,1)
OVOO = eris.OOOV[kn,ki,km].transpose(2,3,0,1) - eris.OOOV[km,ki,kn].transpose(0,3,2,1)
Wvvvo[ka,ke,kb] += 0.5*lib.einsum('meni,mnab->aebi',ovoo,tauaa[km,kn,ka])
WVVvo[ka,ke,kb] += 0.5*lib.einsum('MEni,nMbA->AEbi',OVoo,tauab[kn,km,kb])
WVVvo[ka,ke,kb] += 0.5*lib.einsum('miNE,mNbA->AEbi',ooOV,tauab[km,kn,kb])
WvvVO[ka,ke,kb] += 0.5*lib.einsum('meNI,mNaB->aeBI',ovOO,tauab[km,kn,ka])
WvvVO[ka,ke,kb] += 0.5*lib.einsum('MIne,nMaB->aeBI',OOov,tauab[kn,km,ka])
WVVVO[ka,ke,kb] += 0.5*lib.einsum('MENI,MNAB->AEBI',OVOO,taubb[km,kn,ka])
return Wvvvo, WvvVO, WVVvo, WVVVO
def Woooo(cc,t1,t2,eris):
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
_, _, nkpts, nocca, noccb, nvira, nvirb = t2ab.shape
dtype = np.result_type(*t2)
Woooo = np.zeros(eris.oooo.shape, dtype=dtype)
WooOO = np.zeros(eris.ooOO.shape, dtype=dtype)
WOOOO = np.zeros(eris.OOOO.shape, dtype=dtype)
tau_aa, tau_ab, tau_bb = make_tau(cc, t2, t1, t1)
for km in range(nkpts):
for kn in range(nkpts):
tmp_aaaaJ = einsum('xje, ymine->yxminj', t1a, eris.ooov[km,:,kn])
tmp_aaaaJ-= einsum('yie, xmjne->yxminj', t1a, eris.ooov[km,:,kn])
tmp_bbbbJ = einsum('xje, ymine->yxminj', t1b, eris.OOOV[km,:,kn])
tmp_bbbbJ-= einsum('yie, xmjne->yxminj', t1b, eris.OOOV[km,:,kn])
#tmp_aabbJ = einsum('xje, ymine->yxminj', t1b, eris.ooOV[km,:,kn])
#tmp_bbaaJ = einsum('xje, ymine->yxminj', t1a, eris.OOov[km,:,kn])
#tmp_abbaJ = -einsum('yie,xmjne->yxminj', t1b, eris.ooOV[km,:,kn])
tmp_baabJ = -einsum('yie,xmjne->yxminj', t1a, eris.OOov[km,:,kn])
tmp_aabbJ = einsum('xje, ymine->yxminj', t1b, eris.ooOV[km,:,kn])
for ki in range(nkpts):
kj = kconserv[km,ki,kn]
Woooo[km,ki,kn] += tmp_aaaaJ[ki,kj]
WOOOO[km,ki,kn] += tmp_bbbbJ[ki,kj]
WooOO[km,ki,kn] += tmp_aabbJ[ki,kj]
WooOO[kn,ki,km] -= tmp_baabJ[ki,kj].transpose(2,1,0,3)
Woooo[km,ki,kn] += eris.oooo[km,ki,kn]
WooOO[km,ki,kn] += eris.ooOO[km,ki,kn]
WOOOO[km,ki,kn] += eris.OOOO[km,ki,kn]
Woooo = Woooo - Woooo.transpose(2,1,0,5,4,3,6)
WOOOO = WOOOO - WOOOO.transpose(2,1,0,5,4,3,6)
for km, ki, kn in itertools.product(range(nkpts), repeat=3):
kj = kconserv[km, ki, kn]
for ke in range(nkpts):
kf = kconserv[km, ke, kn]
ovov = eris.ovov[km, ke, kn] - eris.ovov[km, kf, kn].transpose(0,3,2,1)
OVOV = eris.OVOV[km, ke, kn] - eris.OVOV[km, kf, kn].transpose(0,3,2,1)
Woooo[km, ki, kn] += 0.5*lib.einsum('ijef,menf->minj', tau_aa[ki, kj, ke], ovov)
WOOOO[km, ki, kn] += 0.5*lib.einsum('IJEF,MENF->MINJ', tau_bb[ki, kj, ke], OVOV)
WooOO[km, ki, kn] += lib.einsum('iJeF,meNF->miNJ', tau_ab[ki, kj, ke], eris.ovOV[km, ke, kn])
WOOoo = None
return Woooo, WooOO, WOOoo, WOOOO
def Woovo(cc,t1,t2,eris):
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, noccb, nvira, nvirb = t2ab.shape[2:]
P = kconserv_mat(nkpts, kconserv)
dtype = np.result_type(*t2)
Woovo = np.zeros((nkpts, nkpts, nkpts, nocca, nocca, nvira, nocca), dtype=dtype)
WooVO = np.zeros((nkpts, nkpts, nkpts, nocca, nocca, nvirb, noccb), dtype=dtype)
WOOvo = np.zeros((nkpts, nkpts, nkpts, noccb, noccb, nvira, nocca), dtype=dtype)
WOOVO = np.zeros((nkpts, nkpts, nkpts, noccb, noccb, nvirb, noccb), dtype=dtype)
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
Woovo[km,ki,kb] += eris.ooov[ki,km,kj].transpose(1,0,3,2).conj() - eris.ooov[kj,km,ki].transpose(1,2,3,0).conj()
WooVO[km,ki,kb] += eris.ooOV[ki,km,kj].transpose(1,0,3,2).conj()
WOOvo[km,ki,kb] += eris.OOov[ki,km,kj].transpose(1,0,3,2).conj()
WOOVO[km,ki,kb] += eris.OOOV[ki,km,kj].transpose(1,0,3,2).conj() - eris.OOOV[kj,km,ki].transpose(1,2,3,0).conj()
for kn in range(nkpts):
ke = kconserv[km,ki,kn]
ooov = eris.ooov[km,ki,kn] - eris.ooov[kn,ki,km].transpose(2,1,0,3)
OOOV = eris.OOOV[km,ki,kn] - eris.OOOV[kn,ki,km].transpose(2,1,0,3)
Woovo[km,ki,kb] += einsum('mine,jnbe->mibj', ooov, t2aa[kj,kn,kb]) + einsum('miNE,jNbE->mibj', eris.ooOV[km,ki,kn], t2ab[kj,kn,kb])
WooVO[km,ki,kb] += einsum('mine,nJeB->miBJ', ooov, t2ab[kn,kj,ke]) + einsum('miNE,JNBE->miBJ', eris.ooOV[km,ki,kn], t2bb[kj,kn,kb])
WOOvo[km,ki,kb] += einsum('MINE,jNbE->MIbj', OOOV, t2ab[kj,kn,kb]) + einsum('MIne,jnbe->MIbj', eris.OOov[km,ki,kn], t2aa[kj,kn,kb])
WOOVO[km,ki,kb] += einsum('MINE,JNBE->MIBJ', OOOV, t2bb[kj,kn,kb]) + einsum('MIne,nJeB->MIBJ', eris.OOov[km,ki,kn], t2ab[kn,kj,ke])
# P(ij)
ke = kconserv[km,kj,kn]
ooov = eris.ooov[km,kj,kn] - eris.ooov[kn,kj,km].transpose(2,1,0,3)
OOOV = eris.OOOV[km,kj,kn] - eris.OOOV[kn,kj,km].transpose(2,1,0,3)
Woovo[km,ki,kb] -= einsum('mjne,inbe->mibj', ooov, t2aa[ki,kn,kb]) + einsum('mjNE,iNbE->mibj', eris.ooOV[km,kj,kn], t2ab[ki,kn,kb])
WooVO[km,ki,kb] -= einsum('NJme,iNeB->miBJ', eris.OOov[kn,kj,km], t2ab[ki,kn,ke])
WOOvo[km,ki,kb] -= einsum('njME,nIbE->MIbj', eris.ooOV[kn,kj,km], t2ab[kn,ki,kb])
WOOVO[km,ki,kb] -= einsum('MJNE,INBE->MIBJ', OOOV, t2bb[ki,kn,kb]) + einsum('MJne,nIeB->MIBJ', eris.OOov[km,kj,kn], t2ab[kn,ki,ke])
ovvo = eris.voov[ki,km,kj].transpose(1,0,3,2).conj() - eris.oovv[km,kj,kb].transpose(0,3,2,1)
OVVO = eris.VOOV[ki,km,kj].transpose(1,0,3,2).conj() - eris.OOVV[km,kj,kb].transpose(0,3,2,1)
ovVO = eris.voOV[ki,km,kj].transpose(1,0,3,2).conj()
OVvo = eris.VOov[ki,km,kj].transpose(1,0,3,2).conj()
Woovo[km,ki,kb] += einsum('ie,mebj->mibj', t1a[ki], ovvo)
WooVO[km,ki,kb] += einsum('ie,meBJ->miBJ', t1a[ki], ovVO)
WOOvo[km,ki,kb] += einsum('IE,MEbj->MIbj', t1b[ki], OVvo)
WOOVO[km,ki,kb] += einsum('IE,MEBJ->MIBJ', t1b[ki], OVVO)
#P(ij)
ovvo = eris.voov[kj,km,ki].transpose(1,0,3,2).conj() - eris.oovv[km,ki,kb].transpose(0,3,2,1)
OVVO = eris.VOOV[kj,km,ki].transpose(1,0,3,2).conj() - eris.OOVV[km,ki,kb].transpose(0,3,2,1)
Woovo[km,ki,kb] -= einsum('je,mebi->mibj', t1a[kj], ovvo)
WooVO[km,ki,kb] -= -einsum('JE,miBE->miBJ', t1b[kj], eris.ooVV[km,ki,kb])
WOOvo[km,ki,kb] -= -einsum('je,MIbe->MIbj', t1a[kj], eris.OOvv[km,ki,kb])
WOOVO[km,ki,kb] -= einsum('JE,MEBI->MIBJ', t1b[kj], OVVO)
for kf in range(nkpts):
kn = kconserv[kb, kj, kf]
ovov = eris.ovov[km,ki,kn] - eris.ovov[km,kf,kn].transpose(0,3,2,1)
OVOV = eris.OVOV[km,ki,kn] - eris.OVOV[km,kf,kn].transpose(0,3,2,1)
Woovo[km,ki,kb] -= einsum('ie,njbf,menf->mibj', t1a[ki], t2aa[kn,kj,kb], ovov) - einsum('ie,jNbF,meNF->mibj', t1a[ki], t2ab[kj,kn,kb], eris.ovOV[km,ki,kn])
WooVO[km,ki,kb] -= -einsum('ie,nJfB,menf->miBJ', t1a[ki], t2ab[kn,kj,kf], ovov) + einsum('ie,NJBF,meNF->miBJ', t1a[ki], t2bb[kn,kj,kb], eris.ovOV[km,ki,kn])
WOOvo[km,ki,kb] -= -einsum('IE,jNbF,MENF->MIbj', t1b[ki], t2ab[kj,kn,kb], OVOV) + einsum('IE,njbf,MEnf->MIbj', t1b[ki], t2aa[kn,kj,kb], eris.OVov[km,ki,kn])
WOOVO[km,ki,kb] -= einsum('IE,NJBF,MENF->MIBJ', t1b[ki], t2bb[kn,kj,kb], OVOV) - einsum('IE,nJfB,MEnf->MIBJ', t1b[ki], t2ab[kn,kj,kf], eris.OVov[km,ki,kn])
#P(ij)
kn = kconserv[kb, ki, kf]
ovov = eris.ovov[km,kj,kn] - eris.ovov[km,kf,kn].transpose(0,3,2,1)
OVOV = eris.OVOV[km,kj,kn] - eris.OVOV[km,kf,kn].transpose(0,3,2,1)
Woovo[km,ki,kb] += einsum('je,nibf,menf->mibj', t1a[kj], t2aa[kn,ki,kb], ovov) - einsum('je,iNbF,meNF->mibj', t1a[kj], t2ab[ki,kn,kb], eris.ovOV[km,kj,kn])
WooVO[km,ki,kb] += -einsum('JE,iNfB,mfNE->miBJ', t1b[kj], t2ab[ki,kn,kf], eris.ovOV[km, kf, kn])
WOOvo[km,ki,kb] += -einsum('je,nIbF,MFne->MIbj', t1a[kj], t2ab[kn,ki,kb], eris.OVov[km, kf, kn])
WOOVO[km,ki,kb] += einsum('JE,NIBF,MENF->MIBJ', t1b[kj], t2bb[kn,ki,kb], OVOV) - einsum('JE,nIfB,MEnf->MIBJ', t1b[kj], t2ab[kn,ki,kf], eris.OVov[km,kj,kn])
Fme, FME = Fov(cc, t1, t2, eris)
Wminj, WmiNJ, WMInj, WMINJ = Woooo(cc,t1,t2,eris)
tauaa, tauab, taubb = make_tau(cc, t2, t1, t1, fac=1.)
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
Woovo[km,ki,kb] -= einsum('me,ijbe->mibj', Fme[km], t2aa[ki,kj,kb])
WooVO[km,ki,kb] -= -einsum('me,iJeB->miBJ', Fme[km], t2ab[ki,kj,km])
WOOvo[km,ki,kb] -= -einsum('ME,jIbE->MIbj', FME[km], t2ab[kj,ki,kb])
WOOVO[km,ki,kb] -= einsum('ME,IJBE->MIBJ', FME[km], t2bb[ki,kj,kb])
Woovo[km,ki,kb] -= einsum('nb, minj->mibj', t1a[kb], Wminj[km, ki, kb])
WooVO[km,ki,kb] -= einsum('NB, miNJ->miBJ', t1b[kb], WmiNJ[km, ki, kb])
WOOvo[km,ki,kb] -= einsum('nb, njMI->MIbj', t1a[kb], WmiNJ[kb, kj, km])
WOOVO[km,ki,kb] -= einsum('NB, MINJ->MIBJ', t1b[kb], WMINJ[km, ki, kb])
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
for ke in range(nkpts):
kf = kconserv[km,ke,kb]
ovvv = eris.vovv[ke,km,kf].transpose(1,0,3,2).conj() - eris.vovv[kf,km,ke].transpose(1,2,3,0).conj()
OVVV = eris.VOVV[ke,km,kf].transpose(1,0,3,2).conj() - eris.VOVV[kf,km,ke].transpose(1,2,3,0).conj()
ovVV = eris.voVV[ke,km,kf].transpose(1,0,3,2).conj()
OVvv = eris.VOvv[ke,km,kf].transpose(1,0,3,2).conj()
Woovo[km,ki,kb] += 0.5 * einsum('mebf,ijef->mibj', ovvv, tauaa[ki,kj,ke])
WooVO[km,ki,kb] += einsum('meBF,iJeF->miBJ', ovVV, tauab[ki,kj,ke])
WOOvo[km,ki,kb] += einsum('MEbf,jIfE->MIbj', OVvv, tauab[kj,ki,kf])
WOOVO[km,ki,kb] += 0.5 * einsum('MEBF,IJEF->MIBJ', OVVV, taubb[ki,kj,ke])
return Woovo, WooVO, WOOvo, WOOVO
def Wooov(cc, t1, t2, eris, kconserv):
t1a, t1b = t1
nkpts = t1a.shape[0]
P = kconserv_mat(nkpts, kconserv)
Wooov = eris.ooov - np.asarray(eris.ooov).transpose(2,1,0,5,4,3,6)
WooOV = np.array(eris.ooOV, copy=True)
WOOov = np.array(eris.OOov, copy=True)
WOOOV = eris.OOOV - np.asarray(eris.OOOV).transpose(2,1,0,5,4,3,6)
Wooov += einsum('yif,xyzmfne->xyzmine', t1a, eris.ovov) - einsum('yif, zyxnfme->xyzmine', t1a, eris.ovov)
WooOV += einsum('yif,xyzmfNE->xyzmiNE', t1a, eris.ovOV)
WOOov += einsum('yIF,xyzMFne->xyzMIne', t1b, eris.OVov)
WOOOV += einsum('yIF,xyzMFNE->xyzMINE', t1b, eris.OVOV) - einsum('yIF, zyxNFME->xyzMINE', t1b, eris.OVOV)
return Wooov, WooOV, WOOov, WOOOV
def Wovvo(cc, t1, t2, eris):
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocca, noccb, nvira, nvirb = t2ab.shape[2:]
Wovvo, WovVO, WOVvo, WOVVO, WoVVo, WOvvO = cc_Wovvo(cc,t1,t2,eris)
for km, kb, ke in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
for kn in range(nkpts):
kf = kconserv[km, ke, kn]
Wovvo[km,ke,kb] += 0.5 * einsum('jnbf,menf->mebj', t2aa[kj,kn,kb], eris.ovov[km,ke,kn])
Wovvo[km,ke,kb] -= 0.5 * einsum('jnbf,mfne->mebj', t2aa[kj,kn,kb], eris.ovov[km,kf,kn])
Wovvo[km,ke,kb] += 0.5 * einsum('jNbF,meNF->mebj', t2ab[kj,kn,kb], eris.ovOV[km,ke,kn])
WOVvo[km,ke,kb] += 0.5 * einsum('jNbF,MENF->MEbj', t2ab[kj,kn,kb], eris.OVOV[km,ke,kn])
WOVvo[km,ke,kb] -= 0.5 * einsum('jNbF,MFNE->MEbj', t2ab[kj,kn,kb], eris.OVOV[km,kf,kn])
WOVvo[km,ke,kb] += 0.5 * einsum('jnbf,MEnf->MEbj', t2aa[kj,kn,kb], eris.OVov[km,ke,kn])
WovVO[km,ke,kb] += 0.5 * einsum('nJfB,menf->meBJ', t2ab[kn,kj,kf], eris.ovov[km,ke,kn])
WovVO[km,ke,kb] -= 0.5 * einsum('nJfB,mfne->meBJ', t2ab[kn,kj,kf], eris.ovov[km,kf,kn])
WovVO[km,ke,kb] += 0.5 * einsum('JNBF,meNF->meBJ', t2bb[kj,kn,kb], eris.ovOV[km,ke,kn])
WOVVO[km,ke,kb] += 0.5 * einsum('JNBF,MENF->MEBJ', t2bb[kj,kn,kb], eris.OVOV[km,ke,kn])
WOVVO[km,ke,kb] -= 0.5 * einsum('JNBF,MFNE->MEBJ', t2bb[kj,kn,kb], eris.OVOV[km,kf,kn])
WOVVO[km,ke,kb] += 0.5 * einsum('nJfB,MEnf->MEBJ', t2ab[kn,kj,kf], eris.OVov[km,ke,kn])
return Wovvo, WovVO, WOVvo, WOVVO
def W1oovv(cc, t1, t2, eris):
kconserv = cc.khelper.kconserv
dtype = np.result_type(*t1)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocc, nvir = t1a.shape
Woovv = np.zeros(eris.oovv.shape, dtype=dtype)
WooVV = np.zeros(eris.ooVV.shape, dtype=dtype)
WOOvv = np.zeros(eris.OOvv.shape, dtype=dtype)
WOOVV = np.zeros(eris.OOVV.shape, dtype=dtype)
for kk in range(nkpts):
for ki in range(nkpts):
for kb in range(nkpts):
kd = kconserv[kk,ki,kb]
Woovv[kk,ki,kb] += eris.oovv[kk,ki,kb]
Woovv[kk,ki,kb] -= eris.voov[kb,ki,kk].transpose(2,1,0,3)
WooVV[kk,ki,kb] += eris.ooVV[kk,ki,kb]
WOOvv[kk,ki,kb] += eris.OOvv[kk,ki,kb]
WOOVV[kk,ki,kb] += eris.OOVV[kk,ki,kb]
WOOVV[kk,ki,kb] -= eris.VOOV[kb,ki,kk].transpose(2,1,0,3)
for kl in range(nkpts):
kc = kconserv[ki,kb,kl]
Woovv[kk,ki,kb] -= einsum('lckd,ilbc->kibd', eris.ovov[kl,kc,kk], t2aa[ki,kl,kb])
Woovv[kk,ki,kb] += einsum('ldkc,ilbc->kibd', eris.ovov[kl,kd,kk], t2aa[ki,kl,kb])
Woovv[kk,ki,kb] -= einsum('LCkd,iLbC->kibd', eris.OVov[kl,kc,kk], t2ab[ki,kl,kb])
WooVV[kk,ki,kb] -= einsum('kcLD,iLcB->kiBD', eris.ovOV[kk,kc,kl], t2ab[ki,kl,kc])
WOOvv[kk,ki,kb] -= einsum('KCld,lIbC->KIbd', eris.OVov[kk,kc,kl], t2ab[kl,ki,kb])
WOOVV[kk,ki,kb] -= einsum('LCKD,ILBC->KIBD', eris.OVOV[kl,kc,kk], t2bb[ki,kl,kb])
WOOVV[kk,ki,kb] += einsum('LDKC,ILBC->KIBD', eris.OVOV[kl,kd,kk], t2bb[ki,kl,kb])
WOOVV[kk,ki,kb] -= einsum('lcKD,lIcB->KIBD', eris.ovOV[kl,kc,kk], t2ab[kl,ki,kc])
return Woovv, WooVV, WOOvv, WOOVV
def W2oovv(cc, t1, t2, eris):
kconserv = cc.khelper.kconserv
dtype = np.result_type(*t1)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocc, nvir = t1a.shape
Woovv = np.zeros(eris.oovv.shape, dtype=dtype)
WooVV = np.zeros(eris.ooVV.shape, dtype=dtype)
WOOvv = np.zeros(eris.OOvv.shape, dtype=dtype)
WOOVV = np.zeros(eris.OOVV.shape, dtype=dtype)
WWooov, WWooOV, WWOOov, WWOOOV = Wooov(cc, t1, t2, eris, kconserv)
for kk in range(nkpts):
for ki in range(nkpts):
for kb in range(nkpts):
kd = kconserv[kk,ki,kb]
Woovv[kk,ki,kb] += einsum('kild,lb->kibd',WWooov[kk,ki,kb],-t1a[kb])
WooVV[kk,ki,kb] += einsum('kiLD,LB->kiBD',WWooOV[kk,ki,kb],-t1b[kb])
WOOvv[kk,ki,kb] += einsum('KIld,lb->KIbd',WWOOov[kk,ki,kb],-t1a[kb])
WOOVV[kk,ki,kb] += einsum('KILD,LB->KIBD',WWOOOV[kk,ki,kb],-t1b[kb])
Woovv[kk,ki,kb] += einsum('ckdb,ic->kibd', eris.vovv[ki,kk,kd].conj(), t1a[ki])
Woovv[kk,ki,kb] -= einsum('dkcb,ic->kibd', eris.vovv[kd,kk,ki].conj(), t1a[ki])
WooVV[kk,ki,kb] += einsum('ckDB,ic->kiBD', eris.voVV[ki,kk,kd].conj(), t1a[ki])
WOOvv[kk,ki,kb] += einsum('CKdb,IC->KIbd', eris.VOvv[ki,kk,kd].conj(), t1b[ki])
WOOVV[kk,ki,kb] += einsum('CKDB,IC->KIBD', eris.VOVV[ki,kk,kd].conj(), t1b[ki])
WOOVV[kk,ki,kb] -= einsum('DKCB,IC->KIBD', eris.VOVV[kd,kk,ki].conj(), t1b[ki])
return Woovv, WooVV, WOOvv, WOOVV
def Woovv(cc, t1, t2, eris):
t1a, t1b = t1
nkpts, nocc, nvir = t1a.shape
Woovv, WooVV, WOOvv, WOOVV = W1oovv(cc, t1, t2, eris)
WWoovv, WWooVV, WWOOvv, WWOOVV = W2oovv(cc, t1, t2, eris)
for kk,ki,kb in itertools.product(range(nkpts), repeat=3):
Woovv[kk,ki,kb] = Woovv[kk,ki,kb] + WWoovv[kk,ki,kb]
WooVV[kk,ki,kb] = WooVV[kk,ki,kb] + WWooVV[kk,ki,kb]
WOOvv[kk,ki,kb] = WOOvv[kk,ki,kb] + WWOOvv[kk,ki,kb]
WOOVV[kk,ki,kb] = WOOVV[kk,ki,kb] + WWOOVV[kk,ki,kb]
return Woovv, WooVV, WOOvv, WOOVV
# vvvv is a string, ('oooo', 'ooov', ..., 'vvvv')
# orbspin can be accessed through general spin-orbital kintermediates eris
# orbspin = eris.mo_coeff.orbspin
def _eri_spin2spatial(chemist_eri_spin, vvvv, eris, nocc, orbspin, cross_ab=False):
nocc_a, nocc_b = nocc
nocc = nocc_a + nocc_b
nkpts = len(orbspin)
idxoa = [np.where(orbspin[k][:nocc] == 0)[0] for k in range(nkpts)]
idxob = [np.where(orbspin[k][:nocc] == 1)[0] for k in range(nkpts)]
idxva = [np.where(orbspin[k][nocc:] == 0)[0] for k in range(nkpts)]
idxvb = [np.where(orbspin[k][nocc:] == 1)[0] for k in range(nkpts)]
nvir_a = len(idxva[0])
nvir_b = len(idxvb[0])
def select_idx(s):
if s.lower() == 'o':
return idxoa, idxob
else:
return idxva, idxvb
if len(vvvv) == 2:
idx1a, idx1b = select_idx(vvvv[0])
idx2a, idx2b = select_idx(vvvv[1])
fa = np.zeros((nkpts,len(idx1a[0]),len(idx2a[0])), dtype=np.complex128)
fb = np.zeros((nkpts,len(idx1b[0]),len(idx2b[0])), dtype=np.complex128)
for k in range(nkpts):
fa[k] = chemist_eri_spin[k, idx1a[k][:,None],idx2a[k]]
fb[k] = chemist_eri_spin[k, idx1b[k][:,None],idx2b[k]]
return fa, fb
idx1a, idx1b = select_idx(vvvv[0])
idx2a, idx2b = select_idx(vvvv[1])
idx3a, idx3b = select_idx(vvvv[2])
idx4a, idx4b = select_idx(vvvv[3])
eri_aaaa = np.zeros((nkpts,nkpts,nkpts,len(idx1a[0]),len(idx2a[0]),len(idx3a[0]),len(idx4a[0])), dtype=np.complex128)
eri_aabb = np.zeros((nkpts,nkpts,nkpts,len(idx1a[0]),len(idx2a[0]),len(idx3b[0]),len(idx4b[0])), dtype=np.complex128)
eri_bbaa = np.zeros((nkpts,nkpts,nkpts,len(idx1b[0]),len(idx2b[0]),len(idx3a[0]),len(idx4a[0])), dtype=np.complex128)
eri_bbbb = np.zeros((nkpts,nkpts,nkpts,len(idx1b[0]),len(idx2b[0]),len(idx3b[0]),len(idx4b[0])), dtype=np.complex128)
if cross_ab:
eri_abba = np.zeros((nkpts,nkpts,nkpts,len(idx1a[0]),len(idx2b[0]),len(idx3b[0]),len(idx4a[0])), dtype=np.complex128)
eri_baab = np.zeros((nkpts,nkpts,nkpts,len(idx1b[0]),len(idx2a[0]),len(idx3a[0]),len(idx4b[0])), dtype=np.complex128)
kconserv = kpts_helper.get_kconserv(eris.cell, eris.kpts)
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kj, kk]
eri_aaaa[ki,kj,kk] = chemist_eri_spin[ki,kj,kk, idx1a[ki][:,None,None,None],idx2a[kj][:,None,None],idx3a[kk][:,None],idx4a[kl]]
eri_aabb[ki,kj,kk] = chemist_eri_spin[ki,kj,kk, idx1a[ki][:,None,None,None],idx2a[kj][:,None,None],idx3b[kk][:,None],idx4b[kl]]
eri_bbaa[ki,kj,kk] = chemist_eri_spin[ki,kj,kk, idx1b[ki][:,None,None,None],idx2b[kj][:,None,None],idx3a[kk][:,None],idx4a[kl]]
eri_bbbb[ki,kj,kk] = chemist_eri_spin[ki,kj,kk, idx1b[ki][:,None,None,None],idx2b[kj][:,None,None],idx3b[kk][:,None],idx4b[kl]]
if cross_ab:
eri_abba[ki,kj,kk] = chemist_eri_spin[ki,kj,kk, idx1a[ki][:,None,None,None],idx2b[kj][:,None,None],idx3b[kk][:,None],idx4a[kl]]
eri_baab[ki,kj,kk] = chemist_eri_spin[ki,kj,kk, idx1b[ki][:,None,None,None],idx2a[kj][:,None,None],idx3a[kk][:,None],idx4b[kl]]
if cross_ab:
return eri_aaaa, eri_aabb, eri_bbaa, eri_bbbb, eri_abba, eri_baab
else:
return eri_aaaa, eri_aabb, eri_bbaa, eri_bbbb
def _eri_spatial2spin(eri_aa_ab_ba_bb, vvvv, eris, orbspin, cross_ab=False):
nocc_a, nocc_b = eris.nocc
nocc = nocc_a + nocc_b
nkpts = len(orbspin)
idxoa = [np.where(orbspin[k][:nocc] == 0)[0] for k in range(nkpts)]
idxob = [np.where(orbspin[k][:nocc] == 1)[0] for k in range(nkpts)]
idxva = [np.where(orbspin[k][nocc:] == 0)[0] for k in range(nkpts)]
idxvb = [np.where(orbspin[k][nocc:] == 1)[0] for k in range(nkpts)]
nvir_a = len(idxva[0])
nvir_b = len(idxvb[0])
def select_idx(s):
if s.lower() == 'o':
return idxoa, idxob
else:
return idxva, idxvb
if len(vvvv) == 2:
idx1a, idx1b = select_idx(vvvv[0])
idx2a, idx2b = select_idx(vvvv[1])
fa, fb = eri_aa_ab_ba_bb
f = np.zeros((nkpts, len(idx1a[0])+len(idx1b[0]),
len(idx2a[0])+len(idx2b[0])), dtype=np.complex128)
for k in range(nkpts):
f[k, idx1a[k][:,None],idx2a[k]] = fa[k]
f[k, idx1b[k][:,None],idx2b[k]] = fb[k]
return f
idx1a, idx1b = select_idx(vvvv[0])
idx2a, idx2b = select_idx(vvvv[1])
idx3a, idx3b = select_idx(vvvv[2])
idx4a, idx4b = select_idx(vvvv[3])
if cross_ab:
eri_aaaa, eri_aabb, eri_bbaa, eri_bbbb, eri_abba, eri_baab = eri_aa_ab_ba_bb
else:
eri_aaaa, eri_aabb, eri_bbaa, eri_bbbb = eri_aa_ab_ba_bb
eri = np.zeros((nkpts,nkpts,nkpts, len(idx1a[0])+len(idx1b[0]),
len(idx2a[0])+len(idx2b[0]),
len(idx3a[0])+len(idx3b[0]),
len(idx4a[0])+len(idx4b[0])), dtype=np.complex128)
kconserv = kpts_helper.get_kconserv(eris.cell, eris.kpts)
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kj, kk]
eri[ki,kj,kk, idx1a[ki][:,None,None,None],idx2a[kj][:,None,None],idx3a[kk][:,None],idx4a[kl]] = eri_aaaa[ki,kj,kk]
eri[ki,kj,kk, idx1a[ki][:,None,None,None],idx2a[kj][:,None,None],idx3b[kk][:,None],idx4b[kl]] = eri_aabb[ki,kj,kk]
eri[ki,kj,kk, idx1b[ki][:,None,None,None],idx2b[kj][:,None,None],idx3a[kk][:,None],idx4a[kl]] = eri_bbaa[ki,kj,kk]
eri[ki,kj,kk, idx1b[ki][:,None,None,None],idx2b[kj][:,None,None],idx3b[kk][:,None],idx4b[kl]] = eri_bbbb[ki,kj,kk]
if cross_ab:
eri[ki,kj,kk, idx1a[ki][:,None,None,None],idx2b[kj][:,None,None],idx3b[kk][:,None],idx4a[kl]] = eri_abba[ki,kj,kk]
eri[ki,kj,kk, idx1b[ki][:,None,None,None],idx2a[kj][:,None,None],idx3a[kk][:,None],idx4b[kl]] = eri_baab[ki,kj,kk]
return eri
|
gkc1000/pyscf
|
pyscf/pbc/cc/kintermediates_uhf.py
|
Python
|
apache-2.0
| 59,610
|
[
"PySCF"
] |
2ba167c5b9afb5f5eb5aeb8916c43637292938a68f22f294aed68a6785faf338
|
# -*- coding: utf-8 -*-
"""
http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html
"""
print (__doc__)
import numpy as np
import copy
import matplotlib
import matplotlib.mlab
import matplotlib.pyplot as plt
from matplotlib import gridspec
import nslkdd.preprocessing as preprocessing
import sugarbee.reduction as reduction
import sugarbee.distance as distance
if __name__ == '__main__':
datasize = 2
df, headers = preprocessing.get_preprocessed_data(datasize)
df_train = copy.deepcopy(df)
df_train.drop('attack',1,inplace=True)
df_train.drop('difficulty',1,inplace=True)
proj = reduction.reduction(df_train, n_components=2)
print proj
print proj[0]
print proj[1]
dist = distance.gaussian(proj[0], proj[1])
print dist
|
zedoul/AnomalyDetection
|
test_discretization/test_distance.py
|
Python
|
mit
| 782
|
[
"Gaussian"
] |
9fc6771f02091fd3d1dad77fd0eed3631f350fe527854e1db8bd35bf06e9ca76
|
"""
Dispersed Phases
================
Create several objects and functions to manipulate dispersed phase particles
The `single_bubble_model`, `stratified_plume_model`, and `bent_plume_model`
all must handle dispersed phase particles in several different ways. This
module defines several particle classes that provide seamless interfaces to
the `dbm` module. It also defines several functions that aid in manipulating
common input data to create the inputs needed to initialize these particle
classes. These classes and functions originated in the older versions of
the `single_bubble_model` and `stratified_plume_model`. This module is a
re-factorization of these modules during creation of the `bent_plume_model`,
which allows all particle manipulations to reside in one place.
Notes
-----
These class objects and helper functions are used throughout the TAMOC
modeling suite.
See Also
--------
single_bubble_model, stratified_plume_model, bent_plume_model
"""
# S. Socolofsky, October 2014, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import seawater
from tamoc import dbm
import numpy as np
from scipy.optimize import fsolve
import unicodedata
from copy import copy
# ----------------------------------------------------------------------------
# Define the Particle objects for the multiphase behavior in the TAMOC models
# ----------------------------------------------------------------------------
class SingleParticle(object):
"""
Interface to the `dbm` module and container for model parameters
This class provides a uniform interface to the `dbm` module objects and
methods and stores the particle-specific model parameters. Because the
methods for `dbm.FluidParticle` and `dbm.InsolubleParticle` sometimes have
different inputs and different outputs, there needs to be a method to
support these interface differences in a single location. This object
solves that problem by providing a single interface and uniform outputs
for the particle properties needed by the single bubble model. This also
affords a convenient place to store the particle-specific model
parameters and behavior, such as mass transfer reduction factor, etc.,
turning off heat transfer once the particle matches the ambient
temperature and turning off the particle buoyancy once the particle is
dissolved.
Parameters
----------
dbm_particle : `dbm.FluidParticle` or `dbm.InsolubleParticle` object
Object describing the particle properties and behavior
m0 : ndarray
Initial masses of the components of the `dbm` particle object (kg)
T0 : float
Initial temperature of the of `dbm` particle object (K)
K : float, default = 1.
Mass transfer reduction factor (--).
K_T : float, default = 1.
Heat transfer reduction factor (--).
fdis : float, default = 1e-6
Fraction of the initial total mass (--) remaining when the particle
should be considered dissolved.
t_hyd : float, default = 0.
Hydrate film formation time (s). Mass transfer is computed by clean
bubble methods for t less than t_hyd and by dirty bubble methods
thereafter. The default behavior is to assume the particle is dirty
or hydrate covered from the release.
lag_time : bool, default = True.
Flag that indicates whether (True) or not (False) to use the
biodegradation lag times data.
Attributes
----------
particle : `dbm.FluidParticle` or `dbm.InsolubleParticle` object
Stores the `dbm_particle` object passed to `__init__()`.
composition : str list
Copy of the `composition` attribute of the `dbm_particle` object.
m0 : ndarray
Initial masses (kg) of the particle components
T0 : float
Initial temperature (K) of the particle
cp : float
Heat capacity at constant pressure (J/(kg K)) of the particle.
K : float
Mass transfer reduction factor (--)
K_T : float
Heat transfer reduction factor (--)
fdis : float
Fraction of initial mass remaining as total dissolution (--)
diss_indices : ndarray bool
Indices of m0 that are non-zero.
Notes
-----
This object only provides an interface to the `return_all` and
`diameter` methods of the `dbm` module objects. The intent is to be as
fast as possible while providing a single location for the necessary
`if`-statements needed to select between soluble and insoluble particle
methods and facilitate turning heat transfer and dissolution on and off
as necessary at the simulation progresses.
Dissolution is turned off component by component as each components mass
becomes fdis times smaller than the initial mass. Once all of the initial
components have been turned off, the particle is assumed to have a
density equation to the ambient water and a slip velocity of zero.
Heat transfer is turned off once the particle comes within 0.1 K of the
ambient temperature. Thereafter, the temperature is forced to track
the ambient temperature.
"""
def __init__(self, dbm_particle, m0, T0, K=1., K_T=1., fdis=1.e-6,
t_hyd=0., lag_time=True):
super(SingleParticle, self).__init__()
# Make sure the masses are in a numpy array
if not isinstance(m0, np.ndarray):
if not isinstance(m0, list):
m0 = np.array([m0])
else:
m0 = np.array(m0)
# Store the input parameters
self.particle = dbm_particle
self.composition = dbm_particle.composition
self.m0 = m0
self.T0 = T0
self.cp = seawater.cp() * 0.5
self.lag_time = lag_time
# Store the particle-specific model parameters
self.K = K
self.K_T = K_T
self.fdis = fdis
self.t_hyd = t_hyd
self.lag_time = lag_time
# Store parameters to track the dissolution of the initial masses
self.diss_indices = self.m0 > 0
def properties(self, m, T, P, Sa, Ta, t):
"""
Return the particle properties from the discrete bubble model
Provides a single interface to the `return_all` methods of the fluid
and insoluble particle objects defined in the `dbm` module.
This method also applies the particle-specific model parameters to
adjust the mass and heat transfer and determine the dissolution state.
Parameters
----------
m : float
mass of the particle (kg)
T : float
particle temperature (K)
P : float
particle pressure (Pa)
Sa : float
salinity of ambient seawater (psu)
Ta : float
temperature of ambient seawater (K)
t : float
age of the particle--time since it was released into the water
column (s)
Returns
-------
A tuple containing:
us : float
slip velocity (m/s)
rho_p : float
particle density (kg/m^3)
A : float
surface area (m^2)
Cs : ndarray, size (nc)
solubility (kg/m^3)
K * beta : ndarray, size (nc)
effective mass transfer coefficient(s) (m/s)
K_T * beta_T : float
effective heat transfer coefficient (m/s)
T : float
temperature of the particle (K)
Notes
-----
For insoluble particles, `Cs` and `beta` are undefined. This method
returns values for these variables that will result in no
dissolution and will also protect model simulations from undefined
mathematical operations (e.g., divide by zero).
"""
# Turn off heat transfer when at equilibrium. This will be a
# persistent change, so it only has to happen once.
if self.K_T > 0. and np.abs(Ta - T) < 0.5:
self.K_T = 0.
# Use the right temperature
if self.K_T == 0.:
T = Ta
# Decide which slip velocity and mass and heat transfer to use
if t < self.t_hyd:
# Treat the particle as clean for slip velocity and mass
# transfer
status = 1
else:
# Use the dirty bubble slip velocity and mass transfer
status = -1
# Distinguish between soluble and insoluble particles
if self.particle.issoluble:
# Get the DBM results
m[m<0] = 0. # stop oscillations at small mass
shape, de, rho_p, us, A, Cs, beta, beta_T = \
self.particle.return_all(m, T, P, Sa, Ta, status)
# Turn off dissolution for "dissolved" components
frac_diss = np.ones(np.size(m))
frac_diss[self.diss_indices] = \
m[self.diss_indices] / self.m0[self.diss_indices]
beta[frac_diss < self.fdis] = 0.
# Shut down bubble forces when particles fully dissolve
if np.sum(beta[self.diss_indices]) == 0.:
# Injected chemicals have dissolved
if np.sum(m[self.diss_indices]) > \
np.sum(m[~self.diss_indices]):
# The whole particle has dissolved
us = 0.0
rho_p = seawater.density(Ta, Sa, P)
else:
# Get the particle properties
shape, de, rho_p, us, A, beta_T = \
self.particle.return_all(m[0], T, P, Sa, Ta, status)
beta = np.array([])
Cs = np.array([])
# Return the particle properties
return (us, rho_p, A, Cs, self.K * beta, self.K_T * beta_T, T)
def diameter(self, m, T, P, Sa, Ta):
"""
Compute the diameter of a particle from mass and density
Computes the diameter of a particle using the methods in the `dbm`
module. This method is used in the post-processor of the `Model`
object, but not in the actual simulation.
Parameters
----------
m : float
mass of the particle (kg)
T : float
particle temperature (K)
P : float
particle pressure (Pa)
Sa : float
salinity of ambient seawater (psu)
Ta : float
temperature of ambient seawater (K)
Returns
-------
de : float
diameter of the particle (m)
"""
# Distinguish between soluble and insoluble particles
if self.particle.issoluble:
de = self.particle.diameter(m, T, P)
else:
de = self.particle.diameter(m, T, P, Sa, Ta)
# Return the diameter
return de
def biodegradation_rate(self, t):
"""
Compute the biodegradation rate constants
Computes the biodegradation rate constants using the method in the
`dbm` module.
Parameters
----------
t : float
current simulation time (s)
Returns
-------
k_bio : ndarray, size (nc)
first-order biodegradation rate constants (1/s)
"""
return self.particle.biodegradation_rate(t, self.lag_time)
class PlumeParticle(SingleParticle):
"""
Interface to the `dbm` module and container for the model parameters
As in the `single_bubble_model.Particle` class, this object provides a
uniform interface to the `dbm` module objects and captures the
particle-specific model parameters.
Parameters
----------
dbm_particle : `dbm.FluidParticle` or `dbm.InsolubleParticle` object
Object describing the particle properties and behavior
m0 : ndarray
Initial masses of one particle for the components of the
`dbm_particle` object (kg)
T0 : float
Initial temperature of the of `dbm` particle object (K)
nb0 : float
Initial number flux of particles at the release (--)
lambda_1 : float
spreading rate of the dispersed phase in a plume (--)
P : float
Local pressure (Pa)
Sa : float
Local salinity surrounding the particle (psu)
Ta : float
Local temperature surrounding the particle (K)
K : float, default = 1.
Mass transfer reduction factor (--).
K_T : float, default = 1.
Heat transfer reduction factor (--).
fdis : float, default = 0.01
Fraction of the initial total mass (--) remaining when the particle
should be considered dissolved.
t_hyd : float, default = 0.
Hydrate film formation time (s). Mass transfer is computed by clean
bubble methods for t less than t_hyd and by dirty bubble methods
thereafter. The default behavior is to assume the particle is dirty
or hydrate covered from the release.
lag_time : bool, default = True.
Flag that indicates whether (True) or not (False) to use the
biodegradation lag times data.
Attributes
----------
particle : `dbm.FluidParticle` or `dbm.InsolubleParticle` object
Stores the `dbm_particle` object passed to `__init__()`.
composition : str list
Copy of the `composition` attribute of the `dbm_particle` object.
m0 : ndarray
Initial masses (kg) of the particle components
T0 : float
Initial temperature (K) of the particle
cp : float
Heat capacity at constant pressure (J/(kg K)) of the particle.
K : float
Mass transfer reduction factor (--)
K_T : float
Heat transfer reduction factor (--)
fdis : float
Fraction of initial mass remaining as total dissolution (--)
diss_indices : ndarray bool
Indices of m0 that are non-zero.
nb0 : float
Initial number flux of particles at the release (--)
lambda_1 : float
Spreading rate of the dispersed phase in a plume (--)
m : ndarray
Current masses of the particle components (kg)
T : float
Current temperature of the particle (K)
us : float
Slip velocity (m/s)
rho_p : float
Particle density (kg/m^3)
A : float
Particle surface area (m^2)
Cs : ndarray
Solubility of each dissolving component in the particle (kg/m^3)
beta : ndarray
Mass transfer coefficients (m/s)
beta_T : float
Heat transfer coefficient (m/s)
See Also
--------
single_bubble_model.Particle
Notes
-----
This object inherits the `single_bubble_model.Particle` object, which
defines the attributes: `particle`, `composition`, `m0`, `T0`, `cp`,
`K`, `K_T`, `fdis`, and `diss_indices` and the methods
`single_bubble_model.Particle.properties`, and
`single_bubble_model.Particle.diameter`.
"""
def __init__(self, dbm_particle, m0, T0, nb0, lambda_1, P, Sa, Ta,
K=1., K_T=1., fdis=1.e-6, t_hyd=0., lag_time=True):
super(PlumeParticle, self).__init__(dbm_particle, m0, T0, K, K_T,
fdis, t_hyd, lag_time)
# Store the input variables related to the particle description
self.nb0 = nb0
# Store the model parameters
self.lambda_1 = lambda_1
# Set the local masses and temperature to their initial values. The
# particle age is zero at instantiation
self.update(m0, T0, P, Sa, Ta, 0.)
def update(self, m, T, P, Sa, Ta, t):
"""
Store the instantaneous values of the particle properties
During the simulation, it is often helpful to keep the state space
variables for each particle stored within the particle, especially
since each particle type (soluble or insoluble) can have different
sizes of arrays for m.
Parameters
----------
m : ndarray
Current masses (kg) of the particle components
T : float
Current temperature (K) of the particle
P : float
Local pressure (Pa)
Sa : float
Local salinity surrounding the particle (psu)
Ta : float
Local temperature surrounding the particle (K)
t : float
age of the particle--time since it was released into the water
column (s)
"""
# Make sure the masses are in a numpy array
if not isinstance(m, np.ndarray):
if not isinstance(m, list):
m = np.array([m])
else:
m = np.array(m)
# Update the variables with their currrent values
self.m = m
if np.sum(self.m) > 0.:
self.us, self.rho_p, self.A, self.Cs, self.beta, \
self.beta_T, self.T = self.properties(m, T, P, Sa, Ta, t)
self.k_bio = self.biodegradation_rate(t)
else:
self.us = 0.
self.rho_p = seawater.density(Ta, Sa, P)
self.A = 0.
self.Cs = np.zeros(len(self.composition))
self.beta = np.zeros(len(self.composition))
self.beta_T = 0.
self.T = Ta
self.k_bio = np.zeros(len(self.composition))
# ----------------------------------------------------------------------------
# Functions that help to create SingleParticle and PlumeParticle objects
# ----------------------------------------------------------------------------
def initial_conditions(profile, z0, dbm_particle, yk, q, q_type, de,
T0=None):
"""
Define standard initial conditions for a PlumeParticle from flow rate
Returns the standard variables describing a particle as needed to
initializae a PlumeParticle object from specification of the dispersed phase
flow rate.
Parameters
----------
profile : `ambient.Profile` object
The ambient CTD object used by the simulation.
z0 : float
Depth of the release point (m)
dbm_particle : `dbm.FluidParticle` or `dbm.InsolubleParticle` object
Object describing the particle properties and behavior
yk : ndarray
Vector of mol fractions of each component of the dispersed phase
particle. If the particle is a `dbm.InsolubleParticle`, then yk
should be equal to one.
q : float
Flux of the dispersed phase, either as the volume flux (m^3/s) at
standard conditions, defined as 0 deg C and 1 bar, or as mass flux
(kg/s).
q_type : int
Determines the type of flux units. 0: we want the mass of a single
particle (hence q = None since it is currently unknown), 1: q is
volume flux, 2: q is mass flux
de : float
Initial diameter (m) of the particle
T0 : float, default = None
Initial temperature of the of `dbm` particle object (K). If None,
then T0 is set equal to the ambient temperature.
Returns
-------
m0 : ndarray
Initial masses of the components of one particle in the `dbm`
particle object (kg)
T0 : float
Initial temperature of the of `dbm` particle object (K)
nb0 : float
Initial number flux of particles at the release (--)
P : float
Local pressure (Pa)
Sa : float
Local salinity surrounding the particle (psu)
Ta : float
Local temperature surrounding the particle (K)
"""
# Make sure yk is an array
if not isinstance(yk, np.ndarray):
if not isinstance(yk, list):
yk = np.array([yk])
else:
yk = np.array(yk)
# Get the ambient conditions at the release
Ta, Sa, P = profile.get_values(z0, ['temperature', 'salinity',
'pressure'])
# Get the particle temperature
if T0 is None:
T0 = copy(Ta)
# Compute the density at standard and in situ conditions
if dbm_particle.issoluble:
mf = dbm_particle.mass_frac(yk)
rho_N = dbm_particle.density(mf, 273.15, 1.e5)
rho_p = dbm_particle.density(mf, T0, P)
else:
mf = 1.
rho_N = dbm_particle.density(273.15, 1.e5, 0., 273.15)
rho_p = dbm_particle.density(T0, P, Sa, Ta)
# Get the mass and number flux of particles
if q_type == 0:
# Compute the mass flux of a single particle from the given diameter
if dbm_particle.issoluble:
m0 = dbm_particle.masses_by_diameter(de, T0, P, yk)
else:
m0 = dbm_particle.mass_by_diameter(de, T0, P, Sa, Ta)
nb0 = 1.
else:
if q_type == 1:
# Compute the total mass flux from the given volume flux at STP
m_dot = q * rho_N
else:
# The input flux is the total mass flux
m_dot = q
# Get the source volume flux and particle number flux
Q = m_dot / rho_p
nb0 = Q / (np.pi * de**3 / 6.)
# Get the initial particle mass(es)
m0 = m_dot / nb0 * mf
# Return the standard variables
return (m0, T0, nb0, P, Sa, Ta)
# ----------------------------------------------------------------------------
# Functions to save and load a particle to an open netCDF4 dataset
# ----------------------------------------------------------------------------
def save_particle_to_nc_file(nc, chem_names, particles, K_T0):
"""
Write the particle attributes to a netCDF output file
Writes all of the object attributes for a `SingleParticle` or
`PlumeParticle` object to a netCDF output file.
Parameters
----------
nc : `netCDF4.Dataset` object
A `netCDF4.Dataset` object that is open and where the particle
attributes should be written
chem_names : str list
A list of chemical names in the composition of the `dbm` objects
in these particles
particles : list of `Particle` objects
List of `SingleParticle`, `PlumeParticle`, or
`bent_plume_model.Particle` objects describing each dispersed phase
in the simulation
K_T0 : ndarray
Array of the initial values of the heat transfer reduction factor.
"""
# Make sure the particles variable is iterable
if not isinstance(particles, list):
particles = [particles]
# Make sure K_T0 is an array
if not isinstance(K_T0, np.ndarray):
if not isinstance(K_T0, list):
K_T0 = np.array([K_T0])
else:
K_T0 = np.array(K_T0)
# Count the number of particles
nparticles = nc.createDimension('nparticles', len(particles))
ngroups = nc.createDimension('ngroups', 15)
if len(chem_names) > 0:
nchems = nc.createDimension('nchems', len(chem_names))
else:
nchems = nc.createDimension('nchems', 1)
num = nc.createDimension('num', 1)
# Save the particle composition
nc.composition = ' '.join(chem_names)
# Create the dataset descriptions for all the particle variables
particle_type = nc.createVariable('particle_type', 'i4', ('num',))
particle_type.long_name = 'dispersed_phases Particle type'
particle_type.standard_name = 'particle_type'
particle_type.units = '0: Single, 1:Plume, 2:Bent plume particle'
issoluble = nc.createVariable('issoluble', 'i4', ('nparticles',))
issoluble.long_name = 'solubility (0: false, 1: true)'
issoluble.standard_name = 'issoluble'
issoluble.units = 'boolean'
isair = nc.createVariable('isair', 'i4', ('nparticles',))
isair.long_name = 'fluid is air (0: false, 1: true)'
isair.standard_name = 'isair'
isair.units = 'boolean'
isfluid = nc.createVariable('isfluid', 'i4', ('nparticles',))
isfluid.long_name = 'Fluid status (0: false, 1: true)'
isfluid.standard_name = 'isfluid'
isfluid.units = 'boolean'
iscompressible = nc.createVariable('iscompressible', 'i4',
('nparticles',))
iscompressible.long_name = 'Compressibility (0: false, 1: true)'
iscompressible.standard_name = 'iscompressible'
iscompressible.units = 'boolean'
calc_delta = nc.createVariable('calc_delta', 'i4', ('nparticles',))
calc_delta.long_name = 'Calculate delta (-1: false, 1: true)'
calc_delta.standard_name = 'calc_delta'
calc_delta.units = 'boolean'
extern_data = nc.createVariable('extern_data', 'i4', ('nparticles',))
extern_data.long_name = 'External chem database (0: false, 1: true)'
extern_data.standard_name = 'extern_data'
extern_data.units = 'boolean'
fp_type = nc.createVariable('fp_type', 'i4', ('nparticles',))
fp_type.long_name = 'fluid phase (0: gas, 1: liquid, 2: solid)'
fp_type.standard_name = 'fp_type'
fp_type.units = 'nondimensional'
rho_p = nc.createVariable('rho_p', 'f8', ('nparticles',))
rho_p.long_name = 'particle density'
rho_p.standard_name = 'rho_p'
rho_p.units = 'kg/m^3'
gamma = nc.createVariable('gamma', 'f8', ('nparticles',))
gamma.long_name = 'API Gravity'
gamma.standard_name = 'gamma'
gamma.units = 'deg API'
beta = nc.createVariable('beta', 'f8', ('nparticles',))
beta.long_name = 'thermal expansion coefficient'
beta.standard_name = 'beta'
beta.units = 'Pa^(-1)'
co = nc.createVariable('co', 'f8', ('nparticles',))
co.long_name = 'isothermal compressibility coefficient'
co.standard_name = 'co'
co.units = 'K^(-1)'
sigma_correction = nc.createVariable('sigma_correction', 'f8',
('nparticles',))
sigma_correction.long_name = 'interfacial tension reduction factor (--)'
sigma_correction.standard_name = 'sigma_correction'
sigma_correction.units = 'nondimensional'
delta_groups = nc.createVariable('delta_groups', 'f8', ('nparticles',
'nchems', 'ngroups'))
delta_groups.long_name = 'group contribution method delta groups'
delta_groups.standard_name = 'delta_groups'
delta_groups.units = 'nondimensional'
m0 = nc.createVariable('m0', 'f8', ('nparticles', 'nchems'))
m0.long_name = 'initial mass flux'
m0.standard_name = 'm0'
m0.units = 'kg/s'
T0 = nc.createVariable('T0', 'f8', ('nparticles'))
T0.long_name = 'initial temperature'
T0.standard_name = 'T0'
T0.units = 'K'
K = nc.createVariable('K', 'f8', ('nparticles',))
K.long_name = 'mass transfer reduction factor'
K.standard_name = 'K'
K.units = 'nondimensional'
K_T = nc.createVariable('K_T', 'f8', ('nparticles',))
K_T.long_name = 'heat transfer reduction factor'
K_T.standard_name = 'K_T'
K_T.units = 'nondimensional'
fdis = nc.createVariable('fdis', 'f8', ('nparticles',))
fdis.long_name = 'dissolution criteria'
fdis.standard_name = 'fdis'
fdis.units = 'nondimensional'
t_hyd = nc.createVariable('t_hyd', 'f8', ('nparticles',))
t_hyd.long_name = 'hydrate formation time'
t_hyd.standard_name = 't_hyd'
t_hyd.units = 's'
# Check if these are plume particle objects
try:
particles[0].integrate
# Must be bent_plume_model.Particle object
particle_type[0] = 2
nb0 = nc.createVariable('nb0', 'f8', ('nparticles'))
nb0.long_name = 'initial bubble number flux'
nb0.standard_name = 'nb0'
nb0.units = 's^(-1)'
nbe = nc.createVariable('nbe', 'f8', ('nparticles'))
nbe.long_name = 'number of bubbles following plume element'
nbe.standard_name = 'nbe'
nbe.units = 'count'
lambda_1 = nc.createVariable('lambda_1', 'f8', ('nparticles'))
lambda_1.long_name = 'bubble spreading ratio'
lambda_1.standard_name = 'lambda_1'
lambda_1.units = 'nondimensional'
integrate = nc.createVariable('integrate', 'i4', ('nparticles',))
integrate.long_name = 'Particle status (0: false, 1: true)'
integrate.standard_name = 'integrate'
integrate.units = 'boolean'
sim_stored = nc.createVariable('sim_stored', 'i4', ('nparticles',))
sim_stored.long_name = 'Tracking state (0: false, 1: true)'
sim_stored.standard_name = 'sim_stored'
sim_stored.units = 'boolean'
farfield = nc.createVariable('farfield', 'i4', ('nparticles',))
farfield.long_name = 'Farfield simualtion (0: false, 1: true)'
farfield.standard_name = 'farfield'
farfield.units = 'boolean'
tp = nc.createVariable('tp', 'f8', ('nparticles'))
tp.long_name = 'time'
tp.standard_name = 't'
tp.units = 's'
xp = nc.createVariable('xp', 'f8', ('nparticles'))
xp.long_name = 'x-coordinate'
xp.standard_name = 'x'
xp.units = 'm'
yp = nc.createVariable('yp', 'f8', ('nparticles'))
yp.long_name = 'y-coordinate'
yp.standard_name = 'y'
yp.units = 'm'
zp = nc.createVariable('zp', 'f8', ('nparticles'))
zp.long_name = 'z-coordinate'
zp.standard_name = 'z'
zp.units = 'm'
zp.axis = 'Z'
zp.positive = 'down'
te = nc.createVariable('te', 'f8', ('nparticles'))
te.long_name = 'particle exit time'
te.standard_name = 'te'
te.units = 's'
xe = nc.createVariable('xe', 'f8', ('nparticles'))
xe.long_name = 'particle exit x-coordinate'
xe.standard_name = 'xe'
xe.units = 'm'
ye = nc.createVariable('ye', 'f8', ('nparticles'))
ye.long_name = 'particle exit y-coordinate'
ye.standard_name = 'ye'
ye.units = 'm'
ze = nc.createVariable('ze', 'f8', ('nparticles'))
ze.long_name = 'particle exit z-coordinate'
ze.standard_name = 'ze'
ze.units = 'm'
ze.axis = 'Z'
ze.positive = 'down'
except AttributeError:
try:
particles[0].nb0
# Must be PlumeParticle object
particle_type[0] = 1
nb0 = nc.createVariable('nb0', 'f8', ('nparticles'))
nb0.long_name = 'initial bubble number flux'
nb0.standard_name = 'nb0'
nb0.units = 's^(-1)'
lambda_1 = nc.createVariable('lambda_1', 'f8', ('nparticles'))
lambda_1.long_name = 'bubble spreading ratio'
lambda_1.standard_name = 'lambda_1'
lambda_1.units = 'nondimensional'
except AttributeError:
particle_type[0] = 0
# Check if we need to reserve space to store an external chemical data
# base of user_data
next_chems = 0
for i in range(len(particles)):
if particles[i].particle.issoluble:
if len(particles[i].particle.user_data) > next_chems:
next_chems = len(particles[i].particle.user_data)
# Python 3 will not index a dict_keys() object because that
# is not a good thing to do. However, that is what we do
# below...hence, we need to make the dict_keys into a list
user_composition = \
list(particles[i].particle.user_data.keys())
if next_chems > 0:
next_chems = nc.createDimension('next_chems', next_chems)
nc.user_composition = ' '.join(user_composition)
M = nc.createVariable('M', 'f8', ('nparticles', 'next_chems'))
M.long_name = 'molecular weight'
M.standard_name = 'M'
M.units = 'kg/mol'
Pc = nc.createVariable('Pc', 'f8', ('nparticles', 'next_chems'))
Pc.long_name = 'pressure at the critical point'
Pc.standard_name = 'Pc'
Pc.units = 'Pa'
Tc = nc.createVariable('Tc', 'f8', ('nparticles', 'next_chems'))
Tc.long_name = 'temperature at the critical point'
Tc.standard_name = 'Tc'
Tc.units = 'K'
Vc = nc.createVariable('Vc', 'f8', ('nparticles', 'next_chems'))
Vc.long_name = 'molar volume at the critical point'
Vc.standard_name = 'Vc'
Vc.units = 'm^3/mol'
Tb = nc.createVariable('Tb', 'f8', ('nparticles', 'next_chems'))
Tb.long_name = 'boiling point'
Tb.standard_name = 'Tb'
Tb.units = 'K'
Vb = nc.createVariable('Vb', 'f8', ('nparticles', 'next_chems'))
Vb.long_name = 'molar volume at the boiling point'
Vb.standard_name = 'Vb'
Vb.units = 'm^3/mol'
omega = nc.createVariable('omega', 'f8', ('nparticles',
'next_chems'))
omega.long_name = 'acentric factor'
omega.standard_name = 'omega'
omega.units = 'nondimensional'
kh_0 = nc.createVariable('kh_0', 'f8', ('nparticles', 'next_chems'))
kh_0.long_name = 'Henrys law constant at 298.15 K'
kh_0.standard_name = 'kh_0'
kh_0.units = 'kg/(m^3 Pa)'
neg_dH_solR = nc.createVariable('neg_dH_solR', 'f8', ('nparticles',
'next_chems'))
neg_dH_solR.long_name = 'negative of the enthalpy of solution / R'
neg_dH_solR.standard_name = 'neg_dH_solR'
neg_dH_solR.units = 'K'
nu_bar = nc.createVariable('nu_bar', 'f8', ('nparticles',
'next_chems'))
nu_bar.long_name = 'specific volume at infinite dilution'
nu_bar.standard_name = 'nu_bar'
nu_bar.units = 'm^3/mol'
B = nc.createVariable('B', 'f8', ('nparticles', 'next_chems'))
B.long_name = 'diffusivity model coefficient B'
B.standard_name = 'B'
B.units = 'm^2/s'
dE = nc.createVariable('dE', 'f8', ('nparticles', 'next_chems'))
dE.long_name = 'diffusivity model coefficient dE'
dE.standard_name = 'dE'
dE.units = 'J/mol'
K_salt = nc.createVariable('K_salt', 'f8', ('nparticles',
'next_chems'))
K_salt.long_name = 'Setschenow salting out correction for solubility'
K_salt.standard_name = 'K_salt'
K_salt.units = 'm^3/mol'
# Store the values for each particle in the list
for i in range(len(particles)):
# Store the variables needed to create dbm particle objects
if particles[i].particle.issoluble:
issoluble[i] = 1
isfluid[i] = 1
isair[i] = particles[i].particle.isair
iscompressible[i] = 1
fp_type[i] = particles[i].particle.fp_type
calc_delta[i] = particles[i].particle.calc_delta
if len(particles[i].particle.user_data) == 0:
extern_data[i] = 0
else:
extern_data[i] = 1
user_data = particles[i].particle.user_data
for j in range(len(user_composition)):
M[i,j] = user_data[user_composition[j]]['M']
Pc[i,j] = user_data[user_composition[j]]['Pc']
Tc[i,j] = user_data[user_composition[j]]['Tc']
Vc[i,j] = user_data[user_composition[j]]['Vc']
Tb[i,j] = user_data[user_composition[j]]['Tb']
Vb[i,j] = user_data[user_composition[j]]['Vb']
omega[i,j] = user_data[user_composition[j]]['omega']
kh_0[i,j] = user_data[user_composition[j]]['kh_0']
neg_dH_solR[i,j] = \
user_data[user_composition[j]]['-dH_solR']
nu_bar[i,j] = user_data[user_composition[j]]['nu_bar']
B[i,j] = user_data[user_composition[j]]['B']
dE[i,j] = user_data[user_composition[j]]['dE']
K_salt[i,j] = user_data[user_composition[j]]['K_salt']
sigma_correction[i] = particles[i].particle.sigma_correction
if particles[i].particle.calc_delta:
delta_groups[i,:,:] = particles[i].particle.delta_groups
else:
delta_groups[i,:,:] = np.zeros((len(chem_names),15))
m0[i,:] = particles[i].m0
rho_p[i] = -1.
gamma[i] = -1.
beta[i] = -1.
co[i] = -1.
else:
issoluble[i] = 0
isair[i] = 0
if particles[i].particle.isfluid:
isfluid[i] = 1
else:
isfluid[i] = 0
if particles[i].particle.iscompressible:
iscompressible[i] = 1
else:
iscompressible[i] = 0
fp_type[i] = 3
calc_delta[i] = -1
sigma_correction[i] = 1.
m0[i,0] = particles[i].m0
rho_p[i] = particles[i].particle.rho_p
gamma[i] = particles[i].particle.gamma
beta[i] = particles[i].particle.beta
co[i] = particles[i].particle.co
# Store the variables needed to create dispersed_phases SingleParticle
# or PlumeParticle objects
T0[i] = particles[i].T0
K[i] = particles[i].K
K_T[i] = K_T0[i]
fdis[i] = particles[i].fdis
t_hyd[i] = particles[i].t_hyd
if particle_type[0] == 1 or particle_type[0] == 2:
nb0[i] = particles[i].nb0
lambda_1[i] = particles[i].lambda_1
if particle_type[0] == 2:
nb0[i] = particles[i].nb0
nbe[i] = particles[i].nbe
lambda_1[i] = particles[i].lambda_1
integrate[i] = particles[i].integrate
sim_stored[i] = particles[i].sim_stored
farfield[i] = particles[i].farfield
tp[i] = particles[i].t
xp[i] = particles[i].x
yp[i] = particles[i].y
zp[i] = particles[i].z
try:
te[i] = particles[i].te
xe[i] = particles[i].xe
ye[i] = particles[i].ye
ze[i] = particles[i].ze
except:
pass
def load_particle_from_nc_file(nc):
"""
Read the complete `particles` list from a netCDF output file
Creates the `particles` list of `SingleParticle`, `PlumeParticle`, or
`bent_plume_model.Particle` objects from the attributes stored in a
netCDF output file.
Parameters
----------
nc : `netCDF4.Dataset` object
A `netCDF4.Dataset` object that is open and where the particle
attributes should be written
particle_type : int
The particle type is either 0: `SingleParticle`, 1: `PlumeParticle`
or 2: `bent_plume_model.Particle`
X0 : ndarray
Vector of initial positions for the `bent_plume_model.Particle`
objects.
"""
# All particles have the same composition
chem_names = str(nc.composition).split()
# Load each particle object separately
particles = []
for i in range(len(nc.dimensions['nparticles'])):
# Create the correct dbm object
if nc.variables['issoluble'][i]:
if nc.variables['extern_data'][i]:
user_data = {}
user_composition = str(nc.user_composition).split()
for j in range(len(user_composition)):
user_data[user_composition[j]] = {}
user_data[user_composition[j]]['M'] = \
nc.variables['M'][i,j]
user_data[user_composition[j]]['Pc'] = \
nc.variables['Pc'][i,j]
user_data[user_composition[j]]['Tc'] = \
nc.variables['Tc'][i,j]
user_data[user_composition[j]]['Vc'] = \
nc.variables['Vc'][i,j]
user_data[user_composition[j]]['Tb'] = \
nc.variables['Tb'][i,j]
user_data[user_composition[j]]['Vb'] = \
nc.variables['Vb'][i,j]
user_data[user_composition[j]]['omega'] = \
nc.variables['omega'][i,j]
user_data[user_composition[j]]['kh_0'] = \
nc.variables['kh_0'][i,j]
user_data[user_composition[j]]['-dH_solR'] = \
nc.variables['neg_dH_solR'][i,j]
user_data[user_composition[j]]['nu_bar'] = \
nc.variables['nu_bar'][i,j]
user_data[user_composition[j]]['B'] = \
nc.variables['B'][i,j]
user_data[user_composition[j]]['dE'] = \
nc.variables['dE'][i,j]
user_data[user_composition[j]]['K_salt'] = \
nc.variables['K_salt'][i,j]
else:
user_data = {}
if nc.variables['calc_delta'][i]:
delta_groups = nc.variables['delta_groups'][i,:,:]
else:
delta_groups = None
particle = dbm.FluidParticle(chem_names,
fp_type=nc.variables['fp_type'][i],
user_data=user_data,
delta_groups=delta_groups,
isair=nc.variables['isair'][i],
sigma_correction=nc.variables['sigma_correction'][i])
m0 = np.array(nc.variables['m0'][i,:])
else:
if nc.variables['isfluid'][i]:
isfluid = True
else:
isfluid = False
if nc.variables['iscompressible'][i]:
iscompressible = True
else:
iscompressible = False
particle = dbm.InsolubleParticle(isfluid, iscompressible,
rho_p=nc.variables['rho_p'][i],
gamma=nc.variables['gamma'][i],
beta=nc.variables['beta'][i],
co=nc.variables['co'][i])
m0 = np.array([nc.variables['m0'][i,0]])
# Create the right dispersed_phases object
if nc.variables['particle_type'][0] == 2:
from tamoc import bent_plume_model as bpm
particle = bpm.Particle(nc.variables['xp'][i],
nc.variables['yp'][i], nc.variables['zp'][i], particle, m0,
nc.variables['T0'][i], nc.variables['nb0'][i],
nc.variables['lambda_1'][i], nc.variables['P'][0],
nc.variables['Sa'][0], nc.variables['Ta'][0],
nc.variables['K'][i], nc.variables['K_T'][i],
nc.variables['fdis'][i], nc.variables['t_hyd'][i])
particle.nbe = nc.variables['nbe'][i]
particle.t = nc.variables['tp'][i]
particle.integrate = nc.variables['integrate'][i]
particle.sim_stored = nc.variables['sim_stored'][i]
particle.farfield = nc.variables['farfield'][i]
if nc.variables['te'][i] > 0.:
particle.te = nc.variables['te'][i]
particle.xe = nc.variables['xe'][i]
particle.ye = nc.variables['ye'][i]
particle.ze = nc.variables['ze'][i]
elif nc.variables['particle_type'][0] == 1:
particle = PlumeParticle(particle, m0,
nc.variables['T0'][i], nc.variables['nb0'][i],
nc.variables['lambda_1'][i], nc.variables['P'][0],
nc.variables['Sa'][0], nc.variables['Ta'][0],
nc.variables['K'][i], nc.variables['K_T'][i],
nc.variables['fdis'][i], nc.variables['t_hyd'][i])
else:
particle = SingleParticle(particle, m0,
nc.variables['T0'][i], nc.variables['K'][i],
nc.variables['K_T'][i], nc.variables['fdis'][i],
nc.variables['t_hyd'][i])
# Add this particle to the particles list
particles.append(particle)
# Return the list of particles and their composition
return (particles, chem_names)
# ----------------------------------------------------------------------------
# Functions for shear entrainment
# ----------------------------------------------------------------------------
def shear_entrainment(U, Us, rho, rho_a, b, sin_p, p):
"""
Compute the entrainment coefficient for shear entrainment
Computes the entrainment coefficient for the shear entrainment for a top
hat model. This code can be used by both the bent plume model and the
stratified plume model. It is based on the concepts for shear entrainment
in Lee and Cheung (1990) and adapted by the model in Jirka (2004). The
model works for pure jets, pure plumes, and buoyant jets.
Parameters
----------
U : float
Top hat velocity of entrained plume water (m/s)
Us : float
Component of the ambient current projected along the plume
centerline (m/s)
rho : float
Density of the entrained plume fluid (kg/m^3)
rho_a : float
Density of the ambient water at the current height (kg/m^3)
sin_p : float
Sine of the angle phi from the horizontal with down being positive (up
is - pi/2)
Cosine of the angle theta from the crossflow direction
p : `bent_plume_model.ModelParams` or `stratified_plume_model.ModelParams`
Object containing the present model parameters
Returns
-------
alpha_s : float
The shear entrainment coefficient (--)
"""
# Gaussian model jet entrainment coefficient
alpha_j = p.alpha_j
# Gaussian model plume entrainment coefficient
if rho_a == rho:
# This is a pure jet
alpha_p = 0.
else:
# This is a plume; compute the densimetric Gaussian Froude number
F1 = 2. * np.abs(U - Us) / np.sqrt(p.g * np.abs(rho_a - rho) * (1. +
1.2**2) / 1.2**2 / rho_a * b / np.sqrt(2.))
# Follow Figure 13 in Jirka (2004)
if np.abs(F1**2 / sin_p) > p.alpha_Fr / 0.028:
alpha_p = - np.sign(rho_a - rho) * p.alpha_Fr * sin_p / F1**2
else:
alpha_p = - (0.083 - p.alpha_j) / (p.alpha_Fr / 0.028) * F1**2 / \
sin_p * np.sign(rho_a - rho)
# Compute the total shear entrainment coefficient for the top-hat model
if (np.abs(U - Us) + U) == 0:
alpha_s = np.sqrt(2.) * alpha_j
else:
alpha_s = np.sqrt(2.) * (alpha_j + alpha_p) * 2. * U / \
(np.abs(U - Us) + U)
# Return the total shear entrainment coefficient
return alpha_s
# ----------------------------------------------------------------------------
# Functions for hydrate skin model
# ----------------------------------------------------------------------------
def hydrate_formation_time(dbm_obj, z, m, T, profile):
"""
Compute the hydrate formation time
Computes the time to form a hydrate shell using the empirical model from
Jun et al. (2015). If the particle is above the hydrate stability zone,
the formation time is np.inf. If it is below the hydrate statbility
line, the maximum formation time t_star is computed based on the particle
diameter. For high hydrate subcooling, the formation time can be
accelerated by a factor phi = f(extent of subcooling). The final
hydrate formation time is t_hyd = phi * t_star.
The idea behind this model is that bubbles or droplets in the ocen may
form a hydrate shell that results in dirty-bubble mass and heat transfer
and rise velocity. This algorithm sets the time to form the shell based
on measured field data by Rehder et al. (2002). The model has been
validated to field data in Romer et al. (2012), McGinnis et al. (2006),
Warkinski et al. (2014), and the GISR field experiments.
Parameters
----------
dbm_obj : `dbm.FluidParticle` object
Discrete bubble model `dbm.FluidParticle` object. Since this method
must calculate the hydrate stability temperature, it cannot be used
on `dbm.InsolubleParticle` objects. A hydrate formation time can
still be set for those particles, but not estimated from this
function.
z : float
Release depth (m)
m : ndarray
Initial masses of the components of the `dbm_obj` (kg)
T : float
Initial temperature of the of `dbm~_obj` particle (K)
profile : `ambient.Profile` object
An object containing the ambient CTD data and associated methods.
Returns
-------
t_hyd : float
Hydrate formation time (s)
"""
# Get the ambient properties at the depth
Ta, Sa, P = profile.get_values(z, ['temperature', 'salinity',
'pressure'])
# Compute the diameter of the particle
de = dbm_obj.diameter(m, T, P)
# Estimate the hydrate stability temperature
T_hyd = dbm_obj.hydrate_stability(m, P)
if T_hyd < Ta:
# The particle is above the hydrate stability zone...assume hydrates
# never form.
t_hyd = np.inf
else:
# Follow Wang et al. (2020) GRL
alpha = 3.915
beta = -0.333
t_hyd = alpha * np.pi * (de * 1000)**2 * (T_hyd - Ta)**beta
# Return the formation time
return t_hyd
# ----------------------------------------------------------------------------
# Functions to generate initial conditions for models using these objects
# ----------------------------------------------------------------------------
def zfe_volume_flux(profile, particles, p, X0, R):
"""
Initial volume for a multiphase plume
Uses the Wueest et al. (1992) plume Froude number method to estimate
the amount of entrainment at the source of a dispersed phase plume with
zero continuous phase flux (e.g., a pure bubble, droplet, or particle
plume)
Parameters
----------
profile : `ambient.Profile` object
The ambient CTD object used by the single bubble model simulation.
particles : list of `Particle` objects
List of `SingleParticle`, `PlumeParticle`, or
`bent_plume_model.Particle` objects describing each dispersed phase
in the simulation
p : `stratified_plume_model.ModelParams` or `bent_plume_model.ModelParams`
Object containing the fixed model parameters for one of the integral
plume models
X0 : float
(x, y, depth) coordinates of the release point (m)
R : float
Radius of the equivalent circular area of the release (m)
"""
# The initial condition is valid at the diffuser (e.g., no virtual point
# source for the Wuest et al. 1992 initial conditions). Send back
# exactly what the user supplied
X = X0
# Get X0 as a three-dimensional vector for generality
if not isinstance(X0, np.ndarray):
if not isinstance(X0, list):
X0 = np.array([0., 0., X0])
else:
X0 = np.array(X0)
# Get the ambient conditions at the discharge
Ta, Sa, P = profile.get_values(X0[2], ['temperature', 'salinity',
'pressure'])
rho = seawater.density(Ta, Sa, P)
# Update the particle objects and pull out the multiphase properties.
# Since this is the release, the particle age is zero.
lambda_1 = np.zeros(len(particles))
us = np.zeros(len(particles))
rho_p = np.zeros(len(particles))
Q = np.zeros(len(particles))
for i in range(len(particles)):
particles[i].update(particles[i].m, particles[i].T, P, Sa, Ta, 0.)
lambda_1[i] = particles[i].lambda_1
us[i] = particles[i].us
rho_p[i] = particles[i].rho_p
Q[i] = np.sum(particles[i].m) * particles[i].nb0 / rho_p[i]
# Compute the buoyancy flux weighted average of lambda_1
lambda_ave = bf_average(particles, rho, p.g, p.rho_r, lambda_1)
# Calculate the initial velocity of entrained ambient fluid
u_0 = np.sum(Q) / (np.pi * (lambda_ave * R)**2)
u = wuest_ic(u_0, particles, lambda_1, lambda_ave, us, rho_p, rho, Q, R,
p.g, p.Fr_0)
# The initial plume width is the discharge port width
A = np.pi * R**2
# Calcualte the volume flux
Q = A * u
return (Q, A, X, Ta, Sa, P, rho)
def wuest_ic(u_0, particles, lambda_1, lambda_ave, us, rho_p, rho, Q, R,
g, Fr_0):
"""
Compute the initial velocity of entrained ambient fluid
Computes the initial velocity of the entrained ambient fluid following
the method in Wueest et al. (1992). This method is implicit; thus, an
initial guess for the velocity and a root-finding approach is required.
Parameters
----------
u_0 : float
Initial guess for the entrained fluid velocity (m/s)
particles : list of `Particle` objects
List of `SingleParticle`, `PlumeParticle`, or
`bent_plume_model.Particle` objects describing each dispersed phase
in the simulation
lambda_1 : ndarray
Spreading rate of the each dispersed phase particle in a plume (--)
lambda_ave : float
Buoyancy flux averaged value of lambda_1 (--)
us : ndarray
Slip velocity of each of the dispersed phase particles (m/s)
rho_p : ndarray
Density of each of the dispersed phase particles (kg/m^3)
rho : float
Density of the local ambient continuous phase fluid (kg/m^3)
Q : ndarray
Total volume flux of particles for each dispersed phase (m^3/s)
R : float
Radius of the release port (m)
g : float
Acceleration of gravity (m/s^2)
Fr_0 : float
Desired initial plume Froude number (--)
Returns
-------
u : float
The converged value of the entrained fluid velocity in m/s at the
release location in order to achieve the specified value of Fr_0.
"""
# The Wuest et al. (1992) initial condition is implicit; define the
# residual for use in a root-finding algorithm
def residual(u):
"""
Compute the residual of the Wueest et al. (1992) initial condition
using the current guess for the initial velocity u.
Parameters
----------
u : float
the current guess for the initial velocity (m/s)
Notes
-----
All parameters of `wuest_ic` are global to this function since it is
a subfunction of `wuest_ic`.
"""
# Get the void fraction for the current estimate of the mixture of
# dispersed phases and entrained ambient water
xi = np.zeros(len(particles))
for i in range(len(particles)):
xi[i] = Q[i] / (np.pi * lambda_1[i]**2 * R**2 * (us[i] +
2. * u / (1. + lambda_1[i]**2)))
# Get the mixed-fluid plume density
rho_m = np.sum(xi * rho_p) + (1. - np.sum(xi)) * rho
# Calculate the deviation from the desired Froude number
return Fr_0 - u / np.sqrt(2. * lambda_ave * R * g *
(rho - rho_m) / rho_m)
return fsolve(residual, u_0)[0]
def bf_average(particles, rho, g, rho_r, parm):
"""
Compute a buoyancy-flux-weighted average of `parm`
Computes a weighted average of the values in `parm` using the kinematic
buoyancy flux of each particle containing parm as the weight in the
average calculation.
Parameters
----------
particles : list of `Particle` objects
List of `SingleParticle`, `PlumeParticle`, or
`bent_plume_model.Particle` objects describing each dispersed phase
in the simulation
rho : float
Local density of ambient fluid outside plume (kg/m^3).
g : float
Acceleration of gravity (m/s^2).
rho_r : float
Model reference density (kg/m^3).
parm : ndarray
Numpy array of parameters to average, one value for each
dispersed phase entry (same as elements in parm).
Returns
-------
parm_ave : float
The weighted average of `parm`.
"""
# Compute the total buoyancy flux of each dispersed phase particle in the
# simulation
F = np.zeros(len(particles))
for i in range(len(particles)):
# Get the total particle volume flux
Q = np.sum(particles[i].m) * particles[i].nb0 / particles[i].rho_p
# Compute the particle kinematic buoyancy flux
F[i] = g * (rho - particles[i].rho_p) / rho_r * Q
# Return the buoyancy-flux-weighted value of parm
if np.sum(F) == 0.:
parm = 0.
else:
parm = np.sum(F * parm) / np.sum(F)
return parm
def get_chem_names(particles):
"""
Create a list of chemical names for the dispersed phase particles
Reads the composition attribute of each particle in a `particles` list
and compiles a unique list of particle names.
Parameters
----------
particles : list of `Particle` objects
List of `SingleParticle`, `PlumeParticle`, or
`bent_plume_model.Particle` objects describing each dispersed phase
in the simulation
Returns
-------
chem_names : str list
List of the chemical composition of particles undergoing dissolution
in the `particles` list
"""
# Initialize a list to store the names
chem_names = []
# Add the chemicals that are part of the particle composition
for i in range(len(particles)):
if particles[i].particle.issoluble:
chem_names += [chem for chem in particles[i].composition if
chem not in chem_names]
# Return the list of chemical names
return chem_names
def particles_state_space(particles, nb):
"""
Create the state space describing the dispersed phase properties
Constructs a complete state space of masses and heat content for all of
the particles in the `particles` list.
Parameters
----------
particles : list of `Particle` objects
List of `SingleParticle`, `PlumeParticle`, or
`bent_plume_model.Particle` objects describing each dispersed phase
in the simulation
nb : ndarray
Array of particle numbers for forming the state space. nb can be in
number/T, which will give state space variables in mass flux (M/T) or
in number, which will give state space variables in mass.
Returns
-------
y : ndarray
Array of state space variables for the `particles` objects.
"""
# Get the state variables of each particle, one particle as a time
y = []
for i in range(len(particles)):
# Masses of each element in the particle
y.extend(particles[i].m * nb[i])
# Add in the heat flux of the particle
y.append(np.sum(particles[i].m) * nb[i] *
particles[i].cp * particles[i].T)
# Initialize the particle age to zero
y.append(0.)
# Initialize the particle positions to the center of the plume
y.extend([0., 0., 0.])
# Return the state space as a list
return y
|
socolofs/tamoc
|
tamoc/dispersed_phases.py
|
Python
|
mit
| 59,836
|
[
"Gaussian",
"NetCDF"
] |
16bc2940606da738f7f64b8b940690952fe3e971690db87ebe203e40ad624ed0
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for keras functional model."""
from keras import backend
from keras.engine import input_layer as input_layer_module
from keras.engine import keras_tensor
from keras.engine import node as node_module
import tensorflow.compat.v2 as tf
_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG = (
'Found unexpected instance while processing input tensors for keras '
'functional model. Expecting KerasTensor which is from tf.keras.Input() '
'or output from keras layer call(). Got: {}')
def is_input_keras_tensor(tensor):
"""Check if tensor is directly generated from `tf.keras.Input`.
This check is useful when constructing the functional model, since we will
need to clone Nodes and KerasTensors if the model is building from non input
tensor.
Args:
tensor: A `KerasTensor` as inputs to the functional model.
Returns:
bool. Whether the tensor is directly generated from `tf.keras.Input`.
Raises:
ValueError: if the tensor is not a KerasTensor instance.
"""
if not node_module.is_keras_tensor(tensor):
raise ValueError(_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG.format(tensor))
return tensor.node.is_input
def find_nodes_by_inputs_and_outputs(inputs, outputs):
"""Fetch all Nodes in the graph defined by "inputs" and "outputs".
This method is used to find and then clone Nodes when creating a new
sub-model from an existing functional model.
Args:
inputs: A nested structure of KerasTensor to use as model inputs.
outputs: A nested structure of KerasTensor to use as model outputs.
Returns:
A list of Nodes that are connected to the inputs and outputs.
Raises:
ValueError: when inputs and outputs are disconnected or in case of
unexpected objects in the inputs/outputs.
"""
# We walk the graph bottom up, starting from output nodes, and keep tracing
# the upstream node, until we find all the inputs nodes. We don't use top
# down search here since we don't know whether a certain node is in the graph
# between inputs and outputs, e.g. a functional graph could have multiple
# outputs, and the user could choose a subset of them to build the model.
# The bottom up approach will ensure all the nodes we visit are actually
# in use. If we reach the top and didn't find the nodes in the `inputs`,
# that's an error, since the user didn't specify the correct inputs.
start_keras_tensors = tf.nest.flatten(outputs)
end_keras_tensors = tf.nest.flatten(inputs)
for t in start_keras_tensors + end_keras_tensors:
if not node_module.is_keras_tensor(t):
raise ValueError(_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG.format(t))
end_ids = set([id(kt) for kt in end_keras_tensors])
# Track all the end tensors we found so far, if we didn't reach all the
# user-specified keras inputs after we finish the search, then that's an
# error since the inputs are disconnected from the outputs.
end_ids_found = set()
nodes_to_visit = []
nodes_in_graph = []
node_id_visited = set()
for t in start_keras_tensors:
nodes_to_visit.append(t.node)
while nodes_to_visit:
node = nodes_to_visit.pop(0)
if id(node) in node_id_visited:
continue
node_id_visited.add(id(node))
nodes_in_graph.append(node)
# Any input keras_tensor that produce the current node.
for kt in node.keras_inputs:
if id(kt) in end_ids:
# We found the inputs of the model, stop tracing upstream nodes
end_ids_found.add(id(kt))
continue
inbound_node = kt.node
# In case this is the tf.keras.Input node, we have reached the end of the
# tracing of upstream nodes. Any further tracing will just be an
# infinite loop. we should raise an error here since we didn't find the
# input in the user-specified inputs.
if inbound_node.is_input:
raise ValueError('Found input tensor cannot be reached given provided '
'output tensors. Please make sure the tensor {} is '
'included in the model inputs when building '
'functional model.'.format(kt))
nodes_to_visit.append(inbound_node)
# Do a final check and make sure we have reached all the user-specified inputs
if end_ids != end_ids_found:
unvisited_inputs = [kt for kt in end_keras_tensors
if id(kt) not in end_ids_found]
raise ValueError('Found unvisited input tensors that are disconnected from '
'the outputs: {}'.format(unvisited_inputs))
return nodes_in_graph
def clone_graph_nodes(inputs, outputs):
"""Clone the `Node` between the inputs and output tensors.
This function is used to create a new functional model from any intermediate
keras tensors. The clone of the nodes mimic the behavior of reconstructing the
functional graph network by re-executing all the __call__ methods. The cloned
nodes will be appended to the layers.
Note that a new tf.keras.Inputs will be created for any items in the `inputs`
Args:
inputs: A nested structure of keras_tensors.
outputs: A nested structure of keras_tensors.
Returns:
A pair of inputs and outputs, with cloned keras_tensors. They can be used to
create a new functional model.
"""
nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)
cloned_inputs = []
cloned_outputs = []
# We not only need to create copies of Nodes (mimic the calls), also need to
# clone keras_tensors to avoid the override of _keras_history attached on the
# keras_tensor. The following dict is used to track any keras tensor we cloned
# The key is the string ID of the original keras tensor, and value is the
# cloned keras_tensor instance.
kt_id_mapping = {}
for kt_input in tf.nest.flatten(inputs):
if kt_input.node.is_input:
# For any existing keras_tensor from tf.keras.Input, we leave them as is.
cloned_inputs.append(kt_input)
kt_id_mapping[id(kt_input)] = kt_input
else:
# We need to create a new tf.keras.Input for any intermediate keras_tensor
cpy = _clone_keras_tensor(kt_input)
cloned_input = input_layer_module.Input(tensor=cpy)
cloned_inputs.append(cloned_input)
kt_id_mapping[id(kt_input)] = cloned_input
cloned_inputs = tf.nest.pack_sequence_as(inputs, cloned_inputs)
for kt_output in tf.nest.flatten(outputs):
cpy = _clone_keras_tensor(kt_output)
# We reuse the _keras_history here, which contains the old information. It
# is used in the Node constructor to check if the tensor "is_keras_tensor()"
# The history will be override by the Node constructor anyway for the
# corresponding layer output anyway.
cpy._keras_history = kt_output._keras_history # pylint: disable=protected-access
cloned_outputs.append(cpy)
kt_id_mapping[id(kt_output)] = cpy
cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs)
for node in nodes_to_clone:
# Clone any keras_tensors to avoid override of _keras_history
# Or reuse an existing keras_tensor if it has already been cloned.
output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping)
call_args_copy = clone_keras_tensors(node.call_args, kt_id_mapping)
call_kwargs_copy = clone_keras_tensors(node.call_kwargs, kt_id_mapping)
# Creating new nodes based on the existing node information.
# Node wires itself to inbound and outbound layers.
# The Node constructor actually updates this layer's self._inbound_nodes,
# sets _keras_history on the outputs, and adds itself to the
# `_outbound_nodes` of the layers that produced the inputs to this
# layer call.
node_module.Node(node.layer,
call_args=call_args_copy,
call_kwargs=call_kwargs_copy,
outputs=output_copy)
return cloned_inputs, cloned_outputs
def clone_keras_tensors(args, keras_tensor_mapping):
"""Clone the keras tensors from the inputs.
For any KerasTensor instance in the `args`, a new copy of KerasTensor will
be created if it has not been cloned yet (by checking the
`keras_tensor_mapping`). For any other types, the instance will be unchanged.
This function is useful for cloning the Nodes since KerasTensor can't be
reused across the models.
Args:
args: A nested structure of objects, which could contain KerasTensor.
keras_tensor_mapping: A dict contains the ID of original KerasTensor, and
the cloned KerasTensor instance. The dict will be updated with newly
copied KerasTensor instances within this method.
Returns:
Same structure as inputs, with KerasTensor cloned.
"""
result = []
for obj in tf.nest.flatten(args):
if node_module.is_keras_tensor(obj):
if id(obj) in keras_tensor_mapping:
cpy = keras_tensor_mapping[id(obj)]
else:
# Create copy of keras_tensor if we haven't done it before
cpy = _clone_keras_tensor(obj)
cpy._keras_history = obj._keras_history # pylint: disable=protected-access
keras_tensor_mapping[id(obj)] = cpy
result.append(cpy)
else:
result.append(obj)
return tf.nest.pack_sequence_as(args, result)
def _clone_keras_tensor(kt):
"""Create an identical keras_tensor based on the input.
We use keras_tensor_to_placeholder and keras_tensor_from_tensor to make sure
inferred shape are not lost during the copy.
Args:
kt: the input KerasTensor.
Returns:
An identical copy of the input KerasTensor.
"""
# Create a scratch graph since we don't intend to use the placeholders.
with backend._scratch_graph() as scratch_graph: # pylint: disable=protected-access
with scratch_graph.as_default():
placeholder = keras_tensor.keras_tensor_to_placeholder(kt)
return keras_tensor.keras_tensor_from_tensor(placeholder)
|
keras-team/keras
|
keras/engine/functional_utils.py
|
Python
|
apache-2.0
| 10,492
|
[
"VisIt"
] |
019fbff137b14e9d271277542fb2831df16929d20fdd1d0905a41f7532484584
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
slogdet = gen_linalg_ops.log_matrix_determinant
tf_export('linalg.slogdet')(dispatch.add_dispatch_support(slogdet))
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
logm = gen_linalg_ops.matrix_logarithm
lu = gen_linalg_ops.lu
tf_export('linalg.logm')(dispatch.add_dispatch_support(logm))
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
sqrtm = linalg_ops.matrix_square_root
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
@dispatch.add_dispatch_support
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.linalg.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
axis=[-1])
@tf_export('linalg.adjoint')
@dispatch.add_dispatch_support
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
# This section is ported nearly verbatim from Eigen's implementation:
# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
def _matrix_exp_pade3(matrix):
"""3rd-order Pade approximant for matrix exponential."""
b = [120.0, 60.0, 12.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
tmp = matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade5(matrix):
"""5th-order Pade approximant for matrix exponential."""
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade7(matrix):
"""7th-order Pade approximant for matrix exponential."""
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade9(matrix):
"""9th-order Pade approximant for matrix exponential."""
b = [
17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
2162160.0, 110880.0, 3960.0, 90.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
matrix_8 = math_ops.matmul(matrix_6, matrix_2)
tmp = (
matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = (
b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
b[0] * ident)
return matrix_u, matrix_v
def _matrix_exp_pade13(matrix):
"""13th-order Pade approximant for matrix exponential."""
b = [
64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp_u = (
math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp_u)
tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
matrix_v = (
math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
b[2] * matrix_2 + b[0] * ident)
return matrix_u, matrix_v
@tf_export('linalg.expm')
@dispatch.add_dispatch_support
def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
r"""Computes the matrix exponential of one or more square matrices.
$$exp(A) = \sum_{n=0}^\infty A^n/n!$$
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be found in:
Nicholas J. Higham, "The scaling and squaring method for the matrix
exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor of the same shape as the input
containing the exponential for all input submatrices `[..., :, :]`.
Args:
input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or
`complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
the matrix exponential of the input.
Raises:
ValueError: An unsupported type is provided as input.
@compatibility(scipy)
Equivalent to scipy.linalg.expm
@end_compatibility
"""
with ops.name_scope(name, 'matrix_exponential', [input]):
matrix = ops.convert_to_tensor(input, name='input')
if matrix.shape[-2:] == [0, 0]:
return matrix
batch_shape = matrix.shape[:-2]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(matrix)[:-2]
# reshaping the batch makes the where statements work better
matrix = array_ops.reshape(
matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
l1_norm = math_ops.reduce_max(
math_ops.reduce_sum(
math_ops.abs(matrix),
axis=array_ops.size(array_ops.shape(matrix)) - 2),
axis=-1)[..., array_ops.newaxis, array_ops.newaxis]
const = lambda x: constant_op.constant(x, l1_norm.dtype)
def _nest_where(vals, cases):
assert len(vals) == len(cases) - 1
if len(vals) == 1:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
else:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0],
_nest_where(vals[1:], cases[1:]))
if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
maxnorm = const(3.925724783138660)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (4.258730016922831e-001, 1.880152677804762e+000)
u = _nest_where(conds, (u3, u5, u7))
v = _nest_where(conds, (v3, v5, v7))
elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
maxnorm = const(5.371920351148152)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix)
u9, v9 = _matrix_exp_pade9(matrix)
u13, v13 = _matrix_exp_pade13(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000)
u = _nest_where(conds, (u3, u5, u7, u9, u13))
v = _nest_where(conds, (v3, v5, v7, v9, v13))
else:
raise ValueError('tf.linalg.expm does not support matrices of type %s' %
matrix.dtype)
is_finite = math_ops.is_finite(math_ops.reduce_max(l1_norm))
nan = constant_op.constant(np.nan, matrix.dtype)
result = control_flow_ops.cond(
is_finite, lambda: linalg_ops.matrix_solve(-u + v, u + v),
lambda: array_ops.fill(array_ops.shape(matrix), nan))
max_squarings = math_ops.reduce_max(squarings)
i = const(0.0)
def c(i, _):
return control_flow_ops.cond(is_finite,
lambda: math_ops.less(i, max_squarings),
lambda: constant_op.constant(False))
def b(i, r):
return i + 1, array_ops.where_v2(
math_ops.less(i, squarings), math_ops.matmul(r, r), r)
_, result = control_flow_ops.while_loop(c, b, [i, result])
if not matrix.shape.is_fully_defined():
return array_ops.reshape(
result,
array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
@tf_export('linalg.banded_triangular_solve', v1=[])
def banded_triangular_solve(
bands,
rhs,
lower=True,
adjoint=False, # pylint: disable=redefined-outer-name
name=None):
r"""Solve triangular systems of equations with a banded solver.
`bands` is a tensor of shape `[..., K, M]`, where `K` represents the number
of bands stored. This corresponds to a batch of `M` by `M` matrices, whose
`K` subdiagonals (when `lower` is `True`) are stored.
This operator broadcasts the batch dimensions of `bands` and the batch
dimensions of `rhs`.
Examples:
Storing 2 bands of a 3x3 matrix.
Note that first element in the second row is ignored due to
the 'LEFT_RIGHT' padding.
>>> x = [[2., 3., 4.], [1., 2., 3.]]
>>> x2 = [[2., 3., 4.], [10000., 2., 3.]]
>>> y = tf.zeros([3, 3])
>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(-1, 0))
>>> z
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[2., 0., 0.],
[2., 3., 0.],
[0., 3., 4.]], dtype=float32)>
>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([3, 1]))
>>> soln
<tf.Tensor: shape=(3, 1), dtype=float32, numpy=
array([[0.5 ],
[0. ],
[0.25]], dtype=float32)>
>>> are_equal = soln == tf.linalg.banded_triangular_solve(x2, tf.ones([3, 1]))
>>> tf.reduce_all(are_equal).numpy()
True
>>> are_equal = soln == tf.linalg.triangular_solve(z, tf.ones([3, 1]))
>>> tf.reduce_all(are_equal).numpy()
True
Storing 2 superdiagonals of a 4x4 matrix. Because of the 'LEFT_RIGHT' padding
the last element of the first row is ignored.
>>> x = [[2., 3., 4., 5.], [-1., -2., -3., -4.]]
>>> y = tf.zeros([4, 4])
>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(0, 1))
>>> z
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[-1., 2., 0., 0.],
[ 0., -2., 3., 0.],
[ 0., 0., -3., 4.],
[ 0., 0., -0., -4.]], dtype=float32)>
>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([4, 1]), lower=False)
>>> soln
<tf.Tensor: shape=(4, 1), dtype=float32, numpy=
array([[-4. ],
[-1.5 ],
[-0.6666667],
[-0.25 ]], dtype=float32)>
>>> are_equal = (soln == tf.linalg.triangular_solve(
... z, tf.ones([4, 1]), lower=False))
>>> tf.reduce_all(are_equal).numpy()
True
Args:
bands: A `Tensor` describing the bands of the left hand side, with shape
`[..., K, M]`. The `K` rows correspond to the diagonal to the `K - 1`-th
diagonal (the diagonal is the top row) when `lower` is `True` and
otherwise the `K - 1`-th superdiagonal to the diagonal (the diagonal is
the bottom row) when `lower` is `False`. The bands are stored with
'LEFT_RIGHT' alignment, where the superdiagonals are padded on the right
and subdiagonals are padded on the left. This is the alignment cuSPARSE
uses. See `tf.linalg.set_diag` for more details.
rhs: A `Tensor` of shape [..., M] or [..., M, N] and with the same dtype as
`diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
statically, `rhs` will be treated as a matrix rather than a vector.
lower: An optional `bool`. Defaults to `True`. Boolean indicating whether
`bands` represents a lower or upper triangular matrix.
adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether
to solve with the matrix's block-wise adjoint.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M] or [..., M, N] containing the solutions.
"""
with ops.name_scope(name, 'banded_triangular_solve', [bands, rhs]):
return gen_linalg_ops.banded_triangular_solve(
bands, rhs, lower=lower, adjoint=adjoint)
@tf_export('linalg.tridiagonal_solve')
@dispatch.add_dispatch_support
def tridiagonal_solve(diagonals,
rhs,
diagonals_format='compact',
transpose_rhs=False,
conjugate_rhs=False,
name=None,
partial_pivoting=True,
perturb_singular=False):
r"""Solves tridiagonal systems of equations.
The input can be supplied in various formats: `matrix`, `sequence` and
`compact`, specified by the `diagonals_format` arg.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
In `sequence` format, `diagonals` are supplied as a tuple or list of three
tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
`M-1` or `M`; in the latter case, the last element of superdiagonal and the
first element of subdiagonal will be ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `compact` format is recommended as the one with best performance. In case
you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
An example for a tensor of shape [m, m]:
```python
rhs = tf.constant([...])
matrix = tf.constant([[...]])
m = matrix.shape[0]
dummy_idx = [0, 0] # An arbitrary element to use as a dummy
indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
[[i, i] for i in range(m)], # Diagonal
[dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
diagonals=tf.gather_nd(matrix, indices)
x = tf.linalg.tridiagonal_solve(diagonals, rhs)
```
Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
`[..., M, K]`. The latter allows to simultaneously solve K systems with the
same left-hand sides and K different right-hand sides. If `transpose_rhs`
is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
The batch dimensions, denoted as `...`, must be the same in `diagonals` and
`rhs`.
The output is a tensor of the same shape as `rhs`: either `[..., M]` or
`[..., M, K]`.
The op isn't guaranteed to raise an error if the input matrix is not
invertible. `tf.debugging.check_numerics` can be applied to the output to
detect invertibility problems.
**Note**: with large batch sizes, the computation on the GPU may be slow, if
either `partial_pivoting=True` or there are multiple right-hand sides
(`K > 1`). If this issue arises, consider if it's possible to disable pivoting
and have `K = 1`, or, alternatively, consider using CPU.
On CPU, solution is computed via Gaussian elimination with or without partial
pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE
library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
`diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
statically, `rhs` will be treated as a matrix rather than a vector.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
if the shape of rhs is [..., M]).
conjugate_rhs: If `True`, `rhs` is conjugated before solving.
name: A name to give this `Op` (optional).
partial_pivoting: whether to perform partial pivoting. `True` by default.
Partial pivoting makes the procedure more stable, but slower. Partial
pivoting is unnecessary in some cases, including diagonally dominant and
symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).
perturb_singular: whether to perturb singular matrices to return a finite
result. `False` by default. If true, solutions to systems involving
a singular matrix will be computed by perturbing near-zero pivots in
the partially pivoted LU decomposition. Specifically, tiny pivots are
perturbed by an amount of order `eps * max_{ij} |U(i,j)|` to avoid
overflow. Here `U` is the upper triangular part of the LU decomposition,
and `eps` is the machine precision. This is useful for solving
numerically singular systems when computing eigenvectors by inverse
iteration.
If `partial_pivoting` is `False`, `perturb_singular` must be `False` as
well.
Returns:
A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
If the input matrix is singular, the result is undefined.
Raises:
ValueError: Is raised if any of the following conditions hold:
1. An unsupported type is provided as input,
2. the input tensors have incorrect shapes,
3. `perturb_singular` is `True` but `partial_pivoting` is not.
UnimplementedError: Whenever `partial_pivoting` is true and the backend is
XLA, or whenever `perturb_singular` is true and the backend is
XLA or GPU.
[1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:
Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.
"""
if perturb_singular and not partial_pivoting:
raise ValueError('partial_pivoting must be True if perturb_singular is.')
if diagonals_format == 'compact':
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
if diagonals_format == 'sequence':
if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
raise ValueError('Expected diagonals to be a sequence of length 3.')
superdiag, maindiag, subdiag = diagonals
if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
raise ValueError(
'Tensors representing the three diagonals must have the same shape,'
'except for the last dimension, got {}, {}, {}'.format(
subdiag.shape, maindiag.shape, superdiag.shape))
m = tensor_shape.dimension_value(maindiag.shape[-1])
def pad_if_necessary(t, name, last_dim_padding):
n = tensor_shape.dimension_value(t.shape[-1])
if not n or n == m:
return t
if n == m - 1:
paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +
[last_dim_padding])
return array_ops.pad(t, paddings)
raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
name, m, m - 1, n))
subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
if diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
m = m1 or m2
diagonals = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name)
raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
perturb_singular, name):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = diagonals.shape.rank, rhs.shape.rank
# If we know the rank of the diagonal tensor, do some static checking.
if diags_rank:
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(
diags_rank))
if rhs_rank and rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if (rhs_rank and not diagonals.shape[:-2].is_compatible_with(
rhs.shape[:diags_rank - 2])):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
if diagonals.shape[-2] and diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
def check_num_lhs_matches_num_rhs():
if (diagonals.shape[-1] and rhs.shape[-2] and
diagonals.shape[-1] != rhs.shape[-2]):
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank and diags_rank and rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
perturb_singular, name), -1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
return linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting,
perturb_singular, name)
@tf_export('linalg.tridiagonal_matmul')
@dispatch.add_dispatch_support
def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):
r"""Multiplies tridiagonal matrix by matrix.
`diagonals` is representation of 3-diagonal NxN matrix, which depends on
`diagonals_format`.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
If `sequence` format, `diagonals` is list or tuple of three tensors:
`[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element
of `superdiag` first element of `subdiag` are ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `sequence` format is recommended as the one with the best performance.
`rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.
Example:
```python
superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)
maindiag = tf.constant([2, 2, 2], dtype=tf.float64)
subdiag = tf.constant([0, -1, -1], dtype=tf.float64)
diagonals = [superdiag, maindiag, subdiag]
rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)
x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')
```
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.
diagonals_format: one of `sequence`, or `compact`. Default is `compact`.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M, N] containing the result of multiplication.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
"""
if diagonals_format == 'compact':
superdiag = diagonals[..., 0, :]
maindiag = diagonals[..., 1, :]
subdiag = diagonals[..., 2, :]
elif diagonals_format == 'sequence':
superdiag, maindiag, subdiag = diagonals
elif diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
diags = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
superdiag = diags[..., 0, :]
maindiag = diags[..., 1, :]
subdiag = diags[..., 2, :]
else:
raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)
# C++ backend requires matrices.
# Converting 1-dimensional vectors to matrices with 1 row.
superdiag = array_ops.expand_dims(superdiag, -2)
maindiag = array_ops.expand_dims(maindiag, -2)
subdiag = array_ops.expand_dims(subdiag, -2)
return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)
def _maybe_validate_matrix(a, validate_args):
"""Checks that input is a `float` matrix."""
assertions = []
if not a.dtype.is_floating:
raise TypeError('Input `a` must have `float`-like `dtype` '
'(saw {}).'.format(a.dtype.name))
if a.shape is not None and a.shape.rank is not None:
if a.shape.rank < 2:
raise ValueError('Input `a` must have at least 2 dimensions '
'(saw: {}).'.format(a.shape.rank))
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(
a, rank=2, message='Input `a` must have at least 2 dimensions.'))
return assertions
@tf_export('linalg.matrix_rank')
@dispatch.add_dispatch_support
def matrix_rank(a, tol=None, validate_args=False, name=None):
"""Compute the matrix rank of one or more matrices.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as 'zero'.
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'matrix_rank'.
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
"""
with ops.name_scope(name or 'matrix_rank'):
a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
s = svd(a, compute_uv=False)
if tol is None:
if (a.shape[-2:]).is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = math_ops.reduce_max(array_ops.shape(a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = (
eps * math_ops.cast(m, a.dtype) *
math_ops.reduce_max(s, axis=-1, keepdims=True))
return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)
@tf_export('linalg.pinv')
@dispatch.add_dispatch_support
def pinv(a, rcond=None, validate_args=False, name=None):
"""Compute the Moore-Penrose pseudo-inverse of one or more matrices.
Calculate the [generalized inverse of a matrix](
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
singular-value decomposition (SVD) and including all large singular values.
The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'
[the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then
`A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
`U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
`A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
This function is analogous to [`numpy.linalg.pinv`](
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
default `rcond` is `1e-15`. Here the default is
`10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
rcond: `Tensor` of small singular value cutoffs. Singular values smaller
(in modulus) than `rcond` * largest_singular_value (again, in modulus) are
set to zero. Must broadcast against `tf.shape(a)[:-2]`.
Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'pinv'.
Returns:
a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except
rightmost two dimensions are transposed.
Raises:
TypeError: if input `a` does not have `float`-like `dtype`.
ValueError: if input `a` has fewer than 2 dimensions.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
a = tf.constant([[1., 0.4, 0.5],
[0.4, 0.2, 0.25],
[0.5, 0.25, 0.35]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
a = tf.constant([[1., 0.4, 0.5, 1.],
[0.4, 0.2, 0.25, 2.],
[0.5, 0.25, 0.35, 3.]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[ 0.76, 0.37, 0.21, -0.02],
[ 0.37, 0.43, -0.33, 0.02],
[ 0.21, -0.33, 0.81, 0.01],
[-0.02, 0.02, 0.01, 1. ]], dtype=float32)
```
#### References
[1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,
Inc., 1980, pp. 139-142.
"""
with ops.name_scope(name or 'pinv'):
a = ops.convert_to_tensor(a, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
dtype = a.dtype.as_numpy_dtype
if rcond is None:
def get_dim_size(dim):
dim_val = tensor_shape.dimension_value(a.shape[dim])
if dim_val is not None:
return dim_val
return array_ops.shape(a)[dim]
num_rows = get_dim_size(-2)
num_cols = get_dim_size(-1)
if isinstance(num_rows, int) and isinstance(num_cols, int):
max_rows_cols = float(max(num_rows, num_cols))
else:
max_rows_cols = math_ops.cast(
math_ops.maximum(num_rows, num_cols), dtype)
rcond = 10. * max_rows_cols * np.finfo(dtype).eps
rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')
# Calculate pseudo inverse via SVD.
# Note: if a is Hermitian then u == v. (We might observe additional
# performance by explicitly setting `v = u` in such cases.)
[
singular_values, # Sigma
left_singular_vectors, # U
right_singular_vectors, # V
] = svd(
a, full_matrices=False, compute_uv=True)
# Saturate small singular values to inf. This has the effect of make
# `1. / s = 0.` while not resulting in `NaN` gradients.
cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)
singular_values = array_ops.where_v2(
singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values,
np.array(np.inf, dtype))
# By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse
# is defined as `pinv(a) == v @ inv(s) @ u^H`.
a_pinv = math_ops.matmul(
right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2),
left_singular_vectors,
adjoint_b=True)
if a.shape is not None and a.shape.rank is not None:
a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
return a_pinv
@tf_export('linalg.lu_solve')
@dispatch.add_dispatch_support
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,
tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_solve').
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_solve'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
rhs = array_ops.identity(rhs)
if (rhs.shape.rank == 2 and perm.shape.rank == 1):
# Both rhs and perm have scalar batch_shape.
permuted_rhs = array_ops.gather(rhs, perm, axis=-2)
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = array_ops.shape(rhs)
broadcast_batch_shape = array_ops.broadcast_dynamic_shape(
rhs_shape[:-2],
array_ops.shape(perm)[:-1])
d, m = rhs_shape[-2], rhs_shape[-1]
rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]],
axis=0)
# Tile out rhs.
broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)
broadcast_batch_indices = array_ops.broadcast_to(
math_ops.range(broadcast_batch_size)[:, array_ops.newaxis],
[broadcast_batch_size, d])
broadcast_perm = array_ops.stack(
[broadcast_batch_indices, broadcast_perm], axis=-1)
permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(
array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))
return triangular_solve(
lower_upper, # Only upper is accessed.
triangular_solve(lower, permuted_rhs),
lower=False)
@tf_export('linalg.lu_matrix_inverse')
@dispatch.add_dispatch_support
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
"""Computes the inverse given the LU decomposition(s) of one or more matrices.
This op is conceptually identical to,
```python
inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))
tf.assert_near(tf.matrix_inverse(X), inv_X)
# ==> True
```
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_matrix_inverse').
Returns:
inv_x: The matrix_inv, i.e.,
`tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_matrix_inverse'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
return lu_solve(
lower_upper,
perm,
rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),
validate_args=False)
@tf_export('linalg.lu_reconstruct')
@dispatch.add_dispatch_support
def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):
"""The reconstruct one or more matrices from their LU decomposition(s).
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_reconstruct').
Returns:
x: The original input to `tf.linalg.lu`, i.e., `x` as in,
`lu_reconstruct(*tf.linalg.lu(x))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))
tf.assert_near(x, x_reconstructed)
# ==> True
```
"""
with ops.name_scope(name or 'lu_reconstruct'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(shape[:-1], dtype=lower_upper.dtype))
upper = band_part(lower_upper, num_lower=0, num_upper=-1)
x = math_ops.matmul(lower, upper)
if (lower_upper.shape is None or lower_upper.shape.rank is None or
lower_upper.shape.rank != 2):
# We either don't know the batch rank or there are >0 batch dims.
batch_size = math_ops.reduce_prod(shape[:-2])
d = shape[-1]
x = array_ops.reshape(x, [batch_size, d, d])
perm = array_ops.reshape(perm, [batch_size, d])
perm = map_fn.map_fn(array_ops.invert_permutation, perm)
batch_indices = array_ops.broadcast_to(
math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])
x = array_ops.gather_nd(x, array_ops.stack([batch_indices, perm],
axis=-1))
x = array_ops.reshape(x, shape)
else:
x = array_ops.gather(x, array_ops.invert_permutation(perm))
x.set_shape(lower_upper.shape)
return x
def lu_reconstruct_assertions(lower_upper, perm, validate_args):
"""Returns list of assertions related to `lu_reconstruct` assumptions."""
assertions = []
message = 'Input `lower_upper` must have at least 2 dimensions.'
if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))
message = '`rank(lower_upper)` must equal `rank(perm) + 1`'
if lower_upper.shape.rank is not None and perm.shape.rank is not None:
if lower_upper.shape.rank != perm.shape.rank + 1:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank(
lower_upper, rank=array_ops.rank(perm) + 1, message=message))
message = '`lower_upper` must be square.'
if lower_upper.shape[:-2].is_fully_defined():
if lower_upper.shape[-2] != lower_upper.shape[-1]:
raise ValueError(message)
elif validate_args:
m, n = array_ops.split(
array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)
assertions.append(check_ops.assert_equal(m, n, message=message))
return assertions
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_equal(
array_ops.shape(lower_upper)[-1],
array_ops.shape(rhs)[-2],
message=message))
return assertions
@tf_export('linalg.eigh_tridiagonal')
@dispatch.add_dispatch_support
def eigh_tridiagonal(alpha,
beta,
eigvals_only=True,
select='a',
select_range=None,
tol=None,
name=None):
"""Computes the eigenvalues of a Hermitian tridiagonal matrix.
Args:
alpha: A real or complex tensor of shape (n), the diagonal elements of the
matrix. NOTE: If alpha is complex, the imaginary part is ignored (assumed
zero) to satisfy the requirement that the matrix be Hermitian.
beta: A real or complex tensor of shape (n-1), containing the elements of
the first super-diagonal of the matrix. If beta is complex, the first
sub-diagonal of the matrix is assumed to be the conjugate of beta to
satisfy the requirement that the matrix be Hermitian
eigvals_only: If False, both eigenvalues and corresponding eigenvectors are
computed. If True, only eigenvalues are computed. Default is True.
select: Optional string with values in {‘a’, ‘v’, ‘i’} (default is 'a') that
determines which eigenvalues to calculate:
'a': all eigenvalues.
‘v’: eigenvalues in the interval (min, max] given by `select_range`.
'i’: eigenvalues with indices min <= i <= max.
select_range: Size 2 tuple or list or tensor specifying the range of
eigenvalues to compute together with select. If select is 'a',
select_range is ignored.
tol: Optional scalar. The absolute tolerance to which each eigenvalue is
required. An eigenvalue (or cluster) is considered to have converged if it
lies in an interval of this width. If tol is None (default), the value
eps*|T|_2 is used where eps is the machine precision, and |T|_2 is the
2-norm of the matrix T.
name: Optional name of the op.
Returns:
eig_vals: The eigenvalues of the matrix in non-decreasing order.
eig_vectors: If `eigvals_only` is False the eigenvectors are returned in
the second output argument.
Raises:
ValueError: If input values are invalid.
NotImplemented: Computing eigenvectors for `eigvals_only` = False is
not implemented yet.
This op implements a subset of the functionality of
scipy.linalg.eigh_tridiagonal.
Note: The result is undefined if the input contains +/-inf or NaN, or if
any value in beta has a magnitude greater than
`numpy.sqrt(numpy.finfo(beta.dtype.as_numpy_dtype).max)`.
TODO(b/187527398):
Add support for outer batch dimensions.
#### Examples
```python
import numpy
eigvals = tf.linalg.eigh_tridiagonal([0.0, 0.0, 0.0], [1.0, 1.0])
eigvals_expected = [-numpy.sqrt(2.0), 0.0, numpy.sqrt(2.0)]
tf.assert_near(eigvals_expected, eigvals)
# ==> True
```
"""
with ops.name_scope(name or 'eigh_tridiagonal'):
def _compute_eigenvalues(alpha, beta):
"""Computes all eigenvalues of a Hermitian tridiagonal matrix."""
def _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, x):
"""Implements the Sturm sequence recurrence."""
with ops.name_scope('sturm'):
n = alpha.shape[0]
zeros = array_ops.zeros(array_ops.shape(x), dtype=dtypes.int32)
ones = array_ops.ones(array_ops.shape(x), dtype=dtypes.int32)
# The first step in the Sturm sequence recurrence
# requires special care if x is equal to alpha[0].
def sturm_step0():
q = alpha[0] - x
count = array_ops.where(q < 0, ones, zeros)
q = array_ops.where(
math_ops.equal(alpha[0], x), alpha0_perturbation, q)
return q, count
# Subsequent steps all take this form:
def sturm_step(i, q, count):
q = alpha[i] - beta_sq[i - 1] / q - x
count = array_ops.where(q <= pivmin, count + 1, count)
q = array_ops.where(q <= pivmin, math_ops.minimum(q, -pivmin), q)
return q, count
# The first step initializes q and count.
q, count = sturm_step0()
# Peel off ((n-1) % blocksize) steps from the main loop, so we can run
# the bulk of the iterations unrolled by a factor of blocksize.
blocksize = 16
i = 1
peel = (n - 1) % blocksize
unroll_cnt = peel
def unrolled_steps(start, q, count):
for j in range(unroll_cnt):
q, count = sturm_step(start + j, q, count)
return start + unroll_cnt, q, count
i, q, count = unrolled_steps(i, q, count)
# Run the remaining steps of the Sturm sequence using a partially
# unrolled while loop.
unroll_cnt = blocksize
cond = lambda i, q, count: math_ops.less(i, n)
_, _, count = control_flow_ops.while_loop(
cond, unrolled_steps, [i, q, count], back_prop=False)
return count
with ops.name_scope('compute_eigenvalues'):
if alpha.dtype.is_complex:
alpha = math_ops.real(alpha)
beta_sq = math_ops.real(math_ops.conj(beta) * beta)
beta_abs = math_ops.sqrt(beta_sq)
else:
beta_sq = math_ops.square(beta)
beta_abs = math_ops.abs(beta)
# Estimate the largest and smallest eigenvalues of T using the
# Gershgorin circle theorem.
finfo = np.finfo(alpha.dtype.as_numpy_dtype)
off_diag_abs_row_sum = array_ops.concat(
[beta_abs[:1], beta_abs[:-1] + beta_abs[1:], beta_abs[-1:]], axis=0)
lambda_est_max = math_ops.minimum(
finfo.max, math_ops.reduce_max(alpha + off_diag_abs_row_sum))
lambda_est_min = math_ops.maximum(
finfo.min, math_ops.reduce_min(alpha - off_diag_abs_row_sum))
# Upper bound on 2-norm of T.
t_norm = math_ops.maximum(
math_ops.abs(lambda_est_min), math_ops.abs(lambda_est_max))
# Compute the smallest allowed pivot in the Sturm sequence to avoid
# overflow.
one = np.ones([], dtype=alpha.dtype.as_numpy_dtype)
safemin = np.maximum(one / finfo.max, (one + finfo.eps) * finfo.tiny)
pivmin = safemin * math_ops.maximum(one, math_ops.reduce_max(beta_sq))
alpha0_perturbation = math_ops.square(finfo.eps * beta_abs[0])
abs_tol = finfo.eps * t_norm
if tol:
abs_tol = math_ops.maximum(tol, abs_tol)
# In the worst case, when the absolute tolerance is eps*lambda_est_max
# and lambda_est_max = -lambda_est_min, we have to take as many
# bisection steps as there are bits in the mantissa plus 1.
max_it = finfo.nmant + 1
# Determine the indices of the desired eigenvalues, based on select
# and select_range.
asserts = None
if select == 'a':
target_counts = math_ops.range(n)
elif select == 'i':
asserts = check_ops.assert_less_equal(
select_range[0],
select_range[1],
message='Got empty index range in select_range.')
target_counts = math_ops.range(select_range[0], select_range[1] + 1)
elif select == 'v':
asserts = check_ops.assert_less(
select_range[0],
select_range[1],
message='Got empty interval in select_range.')
else:
raise ValueError("'select must have a value in {'a', 'i', 'v'}.")
if asserts:
with ops.control_dependencies([asserts]):
alpha = array_ops.identity(alpha)
# Run binary search for all desired eigenvalues in parallel, starting
# from an interval slightly wider than the estimated
# [lambda_est_min, lambda_est_max].
fudge = 2.1 # We widen starting interval the Gershgorin interval a bit.
norm_slack = math_ops.cast(n, alpha.dtype) * fudge * finfo.eps * t_norm
if select in {'a', 'i'}:
lower = lambda_est_min - norm_slack - 2 * fudge * pivmin
upper = lambda_est_max + norm_slack + fudge * pivmin
else:
# Count the number of eigenvalues in the given range.
lower = select_range[0] - norm_slack - 2 * fudge * pivmin
upper = select_range[1] + norm_slack + fudge * pivmin
first = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, lower)
last = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, upper)
target_counts = math_ops.range(first, last)
# Pre-broadcast the scalars used in the Sturm sequence for improved
# performance.
upper = math_ops.minimum(upper, finfo.max)
lower = math_ops.maximum(lower, finfo.min)
target_shape = array_ops.shape(target_counts)
lower = array_ops.broadcast_to(lower, shape=target_shape)
upper = array_ops.broadcast_to(upper, shape=target_shape)
pivmin = array_ops.broadcast_to(pivmin, target_shape)
alpha0_perturbation = array_ops.broadcast_to(alpha0_perturbation,
target_shape)
# We compute the midpoint as 0.5*lower + 0.5*upper to avoid overflow in
# (lower + upper) or (upper - lower) when the matrix has eigenvalues
# with magnitude greater than finfo.max / 2.
def midpoint(lower, upper):
return (0.5 * lower) + (0.5 * upper)
def continue_binary_search(i, lower, upper):
return math_ops.logical_and(
math_ops.less(i, max_it),
math_ops.less(abs_tol, math_ops.reduce_max(upper - lower)))
def binary_search_step(i, lower, upper):
mid = midpoint(lower, upper)
counts = _sturm(alpha, beta_sq, pivmin, alpha0_perturbation, mid)
lower = array_ops.where(counts <= target_counts, mid, lower)
upper = array_ops.where(counts > target_counts, mid, upper)
return i + 1, lower, upper
# Start parallel binary searches.
_, lower, upper = control_flow_ops.while_loop(continue_binary_search,
binary_search_step,
[0, lower, upper])
return midpoint(lower, upper)
def _compute_eigenvectors(alpha, beta, eigvals):
"""Implements inverse iteration to compute eigenvectors."""
with ops.name_scope('compute_eigenvectors'):
k = array_ops.size(eigvals)
n = array_ops.size(alpha)
alpha = math_ops.cast(alpha, dtype=beta.dtype)
# Eigenvectors corresponding to cluster of close eigenvalues are
# not unique and need to be explicitly orthogonalized. Here we
# identify such clusters. Note: This function assumes that
# eigenvalues are sorted in non-decreasing order.
gap = eigvals[1:] - eigvals[:-1]
eps = np.finfo(eigvals.dtype.as_numpy_dtype).eps
t_norm = math_ops.maximum(
math_ops.abs(eigvals[0]), math_ops.abs(eigvals[-1]))
gaptol = np.sqrt(eps) * t_norm
# Find the beginning and end of runs of eigenvectors corresponding
# to eigenvalues closer than "gaptol", which will need to be
# orthogonalized against each other.
close = math_ops.less(gap, gaptol)
left_neighbor_close = array_ops.concat([[False], close], axis=0)
right_neighbor_close = array_ops.concat([close, [False]], axis=0)
ortho_interval_start = math_ops.logical_and(
math_ops.logical_not(left_neighbor_close), right_neighbor_close)
ortho_interval_start = array_ops.squeeze(
array_ops.where_v2(ortho_interval_start), axis=-1)
ortho_interval_end = math_ops.logical_and(
left_neighbor_close, math_ops.logical_not(right_neighbor_close))
ortho_interval_end = array_ops.squeeze(
array_ops.where_v2(ortho_interval_end), axis=-1) + 1
num_clusters = array_ops.size(ortho_interval_end)
# We perform inverse iteration for all eigenvectors in parallel,
# starting from a random set of vectors, until all have converged.
v0 = math_ops.cast(
stateless_random_ops.stateless_random_normal(
shape=(k, n), seed=[7, 42]),
dtype=beta.dtype)
nrm_v = norm(v0, axis=1)
v0 = v0 / nrm_v[:, array_ops.newaxis]
zero_nrm = constant_op.constant(0, shape=nrm_v.shape, dtype=nrm_v.dtype)
# Replicate alpha-eigvals(ik) and beta across the k eigenvectors so we
# can solve the k systems
# [T - eigvals(i)*eye(n)] x_i = r_i
# simultaneously using the batching mechanism.
eigvals_cast = math_ops.cast(eigvals, dtype=beta.dtype)
alpha_shifted = (
alpha[array_ops.newaxis, :] - eigvals_cast[:, array_ops.newaxis])
beta = array_ops.tile(beta[array_ops.newaxis, :], [k, 1])
diags = [beta, alpha_shifted, math_ops.conj(beta)]
def orthogonalize_close_eigenvectors(eigenvectors):
# Eigenvectors corresponding to a cluster of close eigenvalues are not
# uniquely defined, but the subspace they span is. To avoid numerical
# instability, we explicitly mutually orthogonalize such eigenvectors
# after each step of inverse iteration. It is customary to use
# modified Gram-Schmidt for this, but this is not very efficient
# on some platforms, so here we defer to the QR decomposition in
# TensorFlow.
def orthogonalize_cluster(cluster_idx, eigenvectors):
start = ortho_interval_start[cluster_idx]
end = ortho_interval_end[cluster_idx]
update_indices = array_ops.expand_dims(
math_ops.range(start, end), -1)
vectors_in_cluster = eigenvectors[start:end, :]
# We use the builtin QR factorization to orthonormalize the
# vectors in the cluster.
q, _ = qr(transpose(vectors_in_cluster))
vectors_to_update = transpose(q)
eigenvectors = array_ops.tensor_scatter_nd_update(
eigenvectors, update_indices, vectors_to_update)
return cluster_idx + 1, eigenvectors
_, eigenvectors = control_flow_ops.while_loop(
lambda i, ev: math_ops.less(i, num_clusters),
orthogonalize_cluster, [0, eigenvectors])
return eigenvectors
def continue_iteration(i, _, nrm_v, nrm_v_old):
max_it = 5 # Taken from LAPACK xSTEIN.
min_norm_growth = 0.1
norm_growth_factor = constant_op.constant(
1 + min_norm_growth, dtype=nrm_v.dtype)
# We stop the inverse iteration when we reach the maximum number of
# iterations or the norm growths is less than 10%.
return math_ops.logical_and(
math_ops.less(i, max_it),
math_ops.reduce_any(
math_ops.greater_equal(
math_ops.real(nrm_v),
math_ops.real(norm_growth_factor * nrm_v_old))))
def inverse_iteration_step(i, v, nrm_v, nrm_v_old):
v = tridiagonal_solve(
diags,
v,
diagonals_format='sequence',
partial_pivoting=True,
perturb_singular=True)
nrm_v_old = nrm_v
nrm_v = norm(v, axis=1)
v = v / nrm_v[:, array_ops.newaxis]
v = orthogonalize_close_eigenvectors(v)
return i + 1, v, nrm_v, nrm_v_old
_, v, nrm_v, _ = control_flow_ops.while_loop(continue_iteration,
inverse_iteration_step,
[0, v0, nrm_v, zero_nrm])
return transpose(v)
alpha = ops.convert_to_tensor(alpha, name='alpha')
n = alpha.shape[0]
if n <= 1:
return math_ops.real(alpha)
beta = ops.convert_to_tensor(beta, name='beta')
if alpha.dtype != beta.dtype:
raise ValueError("'alpha' and 'beta' must have the same type.")
eigvals = _compute_eigenvalues(alpha, beta)
if eigvals_only:
return eigvals
eigvectors = _compute_eigenvectors(alpha, beta, eigvals)
return eigvals, eigvectors
|
Intel-Corporation/tensorflow
|
tensorflow/python/ops/linalg/linalg_impl.py
|
Python
|
apache-2.0
| 65,569
|
[
"Gaussian"
] |
68df3f20fc0b686b1ec48388bcd5c644a5e4d8ee0b5cbb2c14012ddd2b438619
|
import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)
import scipy.sparse
from sknn.mlp import BaseMLP
class TestInputDataTypes(unittest.TestCase):
def setUp(self):
self.nn = BaseMLP(layers=[("Gaussian",)], n_iter=1)
def test_FitSciPySparse(self):
X, y = scipy.sparse.csr_matrix((8, 4)), scipy.sparse.csr_matrix((8, 4))
self.nn._fit(X, y)
def test_PredictSciPySparse(self):
X, y = scipy.sparse.csr_matrix((8, 4)), scipy.sparse.csr_matrix((8, 4))
self.nn._fit(X, y)
self.nn._predict(X)
|
capitancambio/scikit-neuralnetwork
|
sknn/tests/test_types.py
|
Python
|
bsd-3-clause
| 587
|
[
"Gaussian"
] |
4ff08d903603baa5491844ad1456cd0f252cb79d55459f6f949f27cc9f44c09f
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
alexnet model adapted for serialization testing
has subset_pct set so that there are a low number of iterations per epoch
and no partial minibatches, dropout is turned off for reproducibility on gpu
and the learning rate is scaled to handle the reduced dropuot percentage.
"""
from neon.util.argparser import NeonArgparser
from neon.initializers import Constant, Gaussian
from neon.layers import Conv, Dropout, Pooling, GeneralizedCost, Affine
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ImageLoader
from neon.callbacks.callbacks import Callbacks
# parse the command line arguments (generates the backend)
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
inner_size=224,
dtype=args.datatype,
subset_pct=0.09990891117239205)
train = ImageLoader(set_name='train', scale_range=(256, 256), shuffle=False,
do_transforms=False, **img_set_options)
test = ImageLoader(set_name='validation', scale_range=(256, 384), shuffle=False,
do_transforms=False, **img_set_options)
layers = [Conv((11, 11, 64), init=Gaussian(scale=0.01), bias=Constant(0),
activation=Rectlin(), padding=3, strides=4),
Pooling(3, strides=2),
Conv((5, 5, 192), init=Gaussian(scale=0.01), bias=Constant(1),
activation=Rectlin(), padding=2),
Pooling(3, strides=2),
Conv((3, 3, 384), init=Gaussian(scale=0.03), bias=Constant(0),
activation=Rectlin(), padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1),
activation=Rectlin(), padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1),
activation=Rectlin(), padding=1),
Pooling(3, strides=2),
Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
Dropout(keep=1.0),
Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
Dropout(keep=1.0),
Affine(nout=1000, init=Gaussian(scale=0.01), bias=Constant(-7), activation=Softmax())]
model = Model(layers=layers)
# drop weights LR by 1/250**(1/3) at epochs (23, 45, 66), drop bias LR by 1/10 at epoch 45
weight_sched = Schedule([22, 44, 65], (1/250.)**(1/3.))
opt_gdm = GradientDescentMomentum(0.01/10, 0.9, wdecay=0.0005, schedule=weight_sched,
stochastic_round=args.rounding)
opt_biases = GradientDescentMomentum(0.02/10, 0.9, schedule=Schedule([44], 0.1),
stochastic_round=args.rounding)
opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})
# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, eval_set=test, metric=valmetric, **args.callback_args)
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
|
DougFirErickson/neon
|
tests/serialization/alexnet.py
|
Python
|
apache-2.0
| 3,992
|
[
"Gaussian"
] |
df5c09a8dfbfa05e45287f43602837d8ed2a073180774c77f92de90aa560e590
|
#!/usr/bin/env python
#MIT License
#Copyright (c) 2017 Massimiliano Patacchiola
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#In this example I will use the class gridworld to generate a 3x4 world
#in which the cleaning robot will move. Using the SARSA algorithm I
#will estimate the state-action matrix.
import numpy as np
from gridworld import GridWorld
def update_state_action(state_action_matrix, visit_counter_matrix, observation, new_observation,
action, new_action, reward, alpha, gamma):
'''Return the updated utility matrix
@param state_action_matrix the matrix before the update
@param observation the state obsrved at t
@param new_observation the state observed at t+1
@param action the action at t
@param new_action the action at t+1
@param reward the reward observed after the action
@param alpha the ste size (learning rate)
@param gamma the discount factor
@return the updated state action matrix
'''
#Getting the values of Q at t and at t+1
col = observation[1] + (observation[0]*4)
q = state_action_matrix[action, col]
col_t1 = new_observation[1] + (new_observation[0]*4)
q_t1 = state_action_matrix[int(new_action) ,col_t1]
#Calculate alpha based on how many time it
#has been visited
alpha_counted = 1.0 / (1.0 + visit_counter_matrix[action, col])
#Applying the update rule
#Here you can change "alpha" with "alpha_counted" if you want
#to take into account how many times that particular state-action
#pair has been visited until now.
state_action_matrix[action ,col] = state_action_matrix[action ,col] + alpha * (reward + gamma * q_t1 - q)
return state_action_matrix
def update_visit_counter(visit_counter_matrix, observation, action):
'''Update the visit counter
Counting how many times a state-action pair has been
visited. This information can be used during the update.
@param visit_counter_matrix a matrix initialised with zeros
@param observation the state observed
@param action the action taken
'''
col = observation[1] + (observation[0]*4)
visit_counter_matrix[action ,col] += 1.0
return visit_counter_matrix
def update_policy(policy_matrix, state_action_matrix, observation):
'''Return the updated policy matrix
@param policy_matrix the matrix before the update
@param state_action_matrix the state-action matrix
@param observation the state obsrved at t
@return the updated state action matrix
'''
col = observation[1] + (observation[0]*4)
#Getting the index of the action with the highest utility
best_action = np.argmax(state_action_matrix[:, col])
#Updating the policy
policy_matrix[observation[0], observation[1]] = best_action
return policy_matrix
def return_epsilon_greedy_action(policy_matrix, observation, epsilon=0.1):
'''Return an action choosing it with epsilon-greedy
@param policy_matrix the matrix before the update
@param observation the state obsrved at t
@param epsilon the value used for computing the probabilities
@return the updated policy_matrix
'''
tot_actions = int(np.nanmax(policy_matrix) + 1)
action = int(policy_matrix[observation[0], observation[1]])
non_greedy_prob = epsilon / tot_actions
greedy_prob = 1 - epsilon + non_greedy_prob
weight_array = np.full((tot_actions), non_greedy_prob)
weight_array[action] = greedy_prob
return np.random.choice(tot_actions, 1, p=weight_array)
def print_policy(policy_matrix):
'''Print the policy using specific symbol.
* terminal state
^ > v < up, right, down, left
# obstacle
'''
counter = 0
shape = policy_matrix.shape
policy_string = ""
for row in range(shape[0]):
for col in range(shape[1]):
if(policy_matrix[row,col] == -1): policy_string += " * "
elif(policy_matrix[row,col] == 0): policy_string += " ^ "
elif(policy_matrix[row,col] == 1): policy_string += " > "
elif(policy_matrix[row,col] == 2): policy_string += " v "
elif(policy_matrix[row,col] == 3): policy_string += " < "
elif(np.isnan(policy_matrix[row,col])): policy_string += " # "
counter += 1
policy_string += '\n'
print(policy_string)
def return_decayed_value(starting_value, global_step, decay_step):
"""Returns the decayed value.
decayed_value = starting_value * decay_rate ^ (global_step / decay_steps)
@param starting_value the value before decaying
@param global_step the global step to use for decay (positive integer)
@param decay_step the step at which the value is decayed
"""
decayed_value = starting_value * np.power(0.1, (global_step/decay_step))
return decayed_value
def main():
env = GridWorld(3, 4)
#Define the state matrix
state_matrix = np.zeros((3,4))
state_matrix[0, 3] = 1
state_matrix[1, 3] = 1
state_matrix[1, 1] = -1
print("State Matrix:")
print(state_matrix)
#Define the reward matrix
reward_matrix = np.full((3,4), -0.04)
reward_matrix[0, 3] = 1
reward_matrix[1, 3] = -1
print("Reward Matrix:")
print(reward_matrix)
#Define the transition matrix
transition_matrix = np.array([[0.8, 0.1, 0.0, 0.1],
[0.1, 0.8, 0.1, 0.0],
[0.0, 0.1, 0.8, 0.1],
[0.1, 0.0, 0.1, 0.8]])
#Random policy
policy_matrix = np.random.randint(low=0, high=4, size=(3, 4)).astype(np.float32)
policy_matrix[1,1] = np.NaN #NaN for the obstacle at (1,1)
policy_matrix[0,3] = policy_matrix[1,3] = -1 #No action for the terminal states
print("Policy Matrix:")
print(policy_matrix)
env.setStateMatrix(state_matrix)
env.setRewardMatrix(reward_matrix)
env.setTransitionMatrix(transition_matrix)
#utility_matrix = np.zeros((3,4))
state_action_matrix = np.zeros((4,12))
visit_counter_matrix = np.zeros((4,12))
gamma = 0.999
alpha = 0.001 #constant step size
tot_epoch = 5000000
print_epoch = 1000
for epoch in range(tot_epoch):
epsilon = return_decayed_value(0.1, epoch, decay_step=100000)
#Reset and return the first observation
observation = env.reset(exploring_starts=True)
is_starting = True
for step in range(1000):
#Take the action from the action matrix
#action = policy_matrix[observation[0], observation[1]]
#Take the action using epsilon-greedy
action = return_epsilon_greedy_action(policy_matrix, observation, epsilon=0.1)
if(is_starting):
action = np.random.randint(0, 4)
is_starting = False
#Move one step in the environment and get obs and reward
new_observation, reward, done = env.step(action)
new_action = policy_matrix[new_observation[0], new_observation[1]]
#Updating the state-action matrix
state_action_matrix = update_state_action(state_action_matrix, visit_counter_matrix, observation, new_observation,
action, new_action, reward, alpha, gamma)
#Updating the policy
policy_matrix = update_policy(policy_matrix, state_action_matrix, observation)
#Increment the visit counter
visit_counter_matrix = update_visit_counter(visit_counter_matrix, observation, action)
observation = new_observation
#print(utility_matrix)
if done: break
if(epoch % print_epoch == 0):
print("")
print("Epsilon: " + str(epsilon))
print("State-Action matrix after " + str(epoch+1) + " iterations:")
print(state_action_matrix)
print("Policy matrix after " + str(epoch+1) + " iterations:")
print_policy(policy_matrix)
#Time to check the utility matrix obtained
print("State-Action matrix after " + str(tot_epoch) + " iterations:")
print(state_action_matrix)
print("Policy matrix after " + str(tot_epoch) + " iterations:")
print_policy(policy_matrix)
if __name__ == "__main__":
main()
|
mpatacchiola/dissecting-reinforcement-learning
|
src/3/temporal_differencing_control_sarsa.py
|
Python
|
mit
| 9,331
|
[
"VisIt"
] |
406e662c242c81d8e1a2a381183b535f0da49e2dd8258bd4efd3a719a94092bf
|
"""
Copyright (c) 2014, Guillermo A. Perez, Universite Libre de Bruxelles
This file is part of the AbsSynthe tool.
AbsSynthe is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
AbsSynthe is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with AbsSynthe. If not, see <http://www.gnu.org/licenses/>.
Guillermo A. Perez
Universite Libre de Bruxelles
gperezme@ulb.ac.be
"""
from abc import ABCMeta, abstractmethod
from itertools import imap
from utils import fixpoint
import log
# game templates for the algorithms implemented here, they all
# use only the functions provided here
class Game:
__metaclass__ = ABCMeta
@abstractmethod
def error(self):
pass
@abstractmethod
def init(self):
pass
class VisitTracker:
def __init__(self):
self.attr = dict()
def is_visited(self, v):
return v in self.attr
def is_in_attr(self, v):
return v in self.attr and self.attr[v]
def visit(self, v):
self.attr[v] = False
return self
def mark_in_attr(self, v, b):
self.attr[v] = b
return self
class ForwardGame(Game):
__metaclass__ = ABCMeta
@abstractmethod
def upost(self, src):
pass
@abstractmethod
def cpost(self, src):
pass
@abstractmethod
def is_env_state(self, state):
pass
def visit_tracker(self):
return VisitTracker()
class BackwardGame(Game):
__megaclass__ = ABCMeta
@abstractmethod
def upre(self, dst):
pass
@abstractmethod
def cpre(self, dst, get_strat):
pass
# OTFUR algo
def forward_safety_synth(game):
assert isinstance(game, ForwardGame)
init_state = game.init()
error_states = game.error()
tracker = game.visit_tracker()
depend = dict()
depend[init_state] = set()
waiting = [(init_state, game.upost(init_state))]
while waiting and not tracker.is_in_attr(init_state):
(s, sp_iter) = waiting.pop()
try:
sp = next(sp_iter)
except StopIteration:
continue # nothing to do here
# push the rest of the iterator back into the stack
waiting.append((s, sp_iter))
# process s, sp_iter
if not tracker.is_visited(sp):
tracker.visit(sp)
tracker.mark_in_attr(
sp, game.is_env_state(sp) and bool(sp & error_states))
if sp in depend:
depend[sp].add((s, iter([sp])))
else:
depend[sp] = set([(s, iter([sp]))])
if tracker.is_in_attr(sp):
waiting.append((s, iter([sp])))
else:
if game.is_env_state(sp):
waiting.append((sp, game.upost(sp)))
else:
waiting.append((sp, game.cpost(sp)))
else:
local_lose = any(imap(tracker.is_in_attr, game.upost(s)))\
if game.is_env_state(s)\
else all(imap(tracker.is_in_attr, game.cpost(s)))
if local_lose:
tracker.mark_in_attr(s, True)
waiting.extend(depend[s])
if not tracker.is_in_attr(sp):
depend[sp].add((s, sp))
log.DBG_MSG("OTFUR, losing[init_state] = " +
str(tracker.is_in_attr(init_state)))
return None if tracker.is_in_attr(init_state) else True
# Classical backward fixpoint algo
def backward_safety_synth(game):
assert isinstance(game, BackwardGame)
init_state = game.init()
error_states = game.error()
log.DBG_MSG("Computing fixpoint of UPRE.")
win_region = ~fixpoint(
error_states,
fun=lambda x: x | game.upre(x),
early_exit=lambda x: x & init_state
)
if not (win_region & init_state):
return None
else:
return win_region
|
osankur/abssynthe
|
algos.py
|
Python
|
gpl-3.0
| 4,244
|
[
"VisIt"
] |
84ba2de384308c85aeaad15e824118fdbdf1a8d0f0dc3589b771c2e4e5ea7b50
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Runner for neuron morphology checks."""
from collections import OrderedDict
from importlib import import_module
import logging
from neurom.check import check_wrapper
from neurom.exceptions import ConfigError
from neurom.fst import _core as fst_core
from neurom.io import load_data, utils
L = logging.getLogger(__name__)
class CheckRunner(object):
"""Class managing checks, config and output."""
def __init__(self, config):
"""Initialize a CheckRunner object."""
self._config = CheckRunner._sanitize_config(config)
self._check_modules = dict((k, import_module('neurom.check.%s' % k))
for k in config['checks'])
def run(self, path):
"""Test a bunch of files and return a summary JSON report."""
SEPARATOR = '=' * 40
summary = {}
res = True
for _f in utils.get_files_by_path(path):
L.info(SEPARATOR)
status, summ = self._check_file(_f)
res &= status
if summ is not None:
summary.update(summ)
L.info(SEPARATOR)
status = 'PASS' if res else 'FAIL'
return {'files': summary, 'STATUS': status}
def _do_check(self, obj, check_module, check_str):
"""Run a check function on obj."""
opts = self._config['options']
if check_str in opts:
fargs = opts[check_str]
if isinstance(fargs, list):
out = check_wrapper(getattr(check_module, check_str))(obj, *fargs)
else:
out = check_wrapper(getattr(check_module, check_str))(obj, fargs)
else:
out = check_wrapper(getattr(check_module, check_str))(obj)
try:
if out.info:
L.debug('%s: %d failing ids detected: %s',
out.title, len(out.info), out.info)
except TypeError: # pragma: no cover
pass
return out
def _check_loop(self, obj, check_mod_str):
"""Run all the checks in a check_module."""
check_module = self._check_modules[check_mod_str]
checks = self._config['checks'][check_mod_str]
result = True
summary = OrderedDict()
for check in checks:
ok = self._do_check(obj, check_module, check)
summary[ok.title] = ok.status
result &= ok.status
return result, summary
def _check_file(self, f):
"""Run tests on a morphology file."""
L.info('File: %s', f)
full_result = True
full_summary = OrderedDict()
try:
data = load_data(f)
except Exception as e: # pylint: disable=W0703
L.error('Failed to load data... skipping tests for this file')
L.error(e.args)
return False, {f: OrderedDict([('ALL', False)])}
try:
result, summary = self._check_loop(data, 'structural_checks')
full_result &= result
full_summary.update(summary)
nrn = fst_core.FstNeuron(data)
result, summary = self._check_loop(nrn, 'neuron_checks')
full_result &= result
full_summary.update(summary)
except Exception as e: # pylint: disable=W0703
L.error('Check failed: %s', str(type(e)) + str(e.args))
full_result = False
full_summary['ALL'] = full_result
for m, s in full_summary.items():
self._log_msg(m, s)
return full_result, {str(f): full_summary}
def _log_msg(self, msg, ok):
"""Helper to log message to the right level."""
if self._config['color']:
CGREEN, CRED, CEND = '\033[92m', '\033[91m', '\033[0m'
else:
CGREEN = CRED = CEND = ''
LOG_LEVELS = {False: logging.ERROR, True: logging.INFO}
# pylint: disable=logging-not-lazy
L.log(LOG_LEVELS[ok],
'%35s %s' + CEND, msg, CGREEN + 'PASS' if ok else CRED + 'FAIL')
@staticmethod
def _sanitize_config(config):
"""Check that the config has the correct keys, add missing keys if necessary."""
if 'checks' in config:
checks = config['checks']
if 'structural_checks' not in checks:
checks['structural_checks'] = []
if 'neuron_checks' not in checks:
checks['neuron_checks'] = []
else:
raise ConfigError('Need to have "checks" in the config')
if 'options' not in config:
L.debug('Using default options')
config['options'] = {}
if 'color' not in config:
config['color'] = False
return config
|
wizmer/NeuroM
|
neurom/check/runner.py
|
Python
|
bsd-3-clause
| 6,398
|
[
"NEURON"
] |
2b77b0c2329951c688a1cc8737348757fb9f78721ac262faff7fc91892cd4de3
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RBiostrings(RPackage):
"""Memory efficient string containers, string matching algorithms, and
other utilities, for fast manipulation of large biological sequences
or sets of sequences."""
homepage = "https://bioconductor.org/packages/Biostrings/"
url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/Biostrings_2.44.2.tar.gz"
version('2.44.2', 'b30d8c116493ea160a0ca0a662666436')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@2.44.2')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-biostrings/package.py
|
Python
|
lgpl-2.1
| 1,948
|
[
"Bioconductor"
] |
99a1d072055c2beff2e64d39ed969e4947885d45a54f10aa9ec4d49a92b1bce1
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import os
import sys
import time
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from .bamsam import convert_bam_file
from .bed import convert_bed_file
from .chain import CHAIN_STRING, ChainFile, ChainIter, ChainEntry, collapse_entries
from .exceptions import G2GChainFileError, G2GVCFError, G2GValueError, G2GLocationError, G2GFastaError, G2GError
from .gtf import convert_gtf_file
from .gtf_db import get_genes_simple, get_transcripts_simple, gtf2db, get_genes, get_gene
from .g2g_utils import format_time, get_logger, parse_location, Location, merge_dicts, reverse_complement_sequence, wrap_sequence
import g2g_fileutils as g2g_fu
from .seq_offsets import offset_parse_chromosomes
from .vcf import VCF, parse_gt_new
from pysam import FastaFile
from pysam import TabixFile
import pysam
LOG = get_logger()
def file_convert(chain_file, input_file, output_file=None, file_format=None, reverse=False):
"""
Convert a file
:param chain_file:
:param input_file:
:param output_file:
:param file_format:
:return:
"""
start = time.time()
if file_format:
file_format = file_format.upper()
if file_format not in ['BED', 'BAM', 'SAM', 'GTF']:
raise G2GValueError("Only BAM/SAM to BAM/SAM, GTF to GTF, or BED to BED are supported")
else:
# try to determine the type from the input
file_all_caps = input_file.upper()
if file_all_caps.endswith(('BAM', 'SAM')):
file_format = 'BAM'
elif file_all_caps.endswith('BED'):
file_format = 'BED'
elif file_all_caps.endswith('GTF'):
file_format = 'GTF'
else:
raise G2GValueError("File format cannot be determined, please specify.")
if file_format in ['BAM', 'SAM']:
convert_bam_file(chain_file=chain_file, file_in=input_file, file_out=output_file, reverse=reverse)
elif file_format in ['GTF']:
convert_gtf_file(chain_file=chain_file, input_file=input_file, output_file=output_file, reverse=reverse)
elif file_format in ['BED']:
convert_bed_file(chain_file=chain_file, input_file=input_file, output_file=output_file, reverse=reverse)
else:
raise G2GValueError("Only BAM/SAM to BAM/SAM, GTF to GTF, or BED to BED are supported")
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
def gtf2chain(chain_file, input_file, output_file, chain_genes=False):
"""
:param chain_file:
:param input_file:
:param output_file:
:param chain_genes:
:return:
"""
start = time.time()
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
chain_file = g2g_fu.check_file(chain_file)
input_file = g2g_fu.check_file(input_file)
output_file = g2g_fu.check_file(output_file, 'w')
output_file_dir = os.path.dirname(output_file)
LOG.info("GTF FILE: {0}".format(input_file))
LOG.info("FROM CHAIN FILE: {0}".format(chain_file))
LOG.info("TO CHAIN FILE: {0}".format(output_file))
temp_db = g2g_fu.gen_file_name("_g2gtempfile", output_file_dir, ".db3")
gtf2db(input_file, temp_db)
db2chain(chain_file, temp_db, output_file, chain_genes)
g2g_fu.delete_file(temp_db)
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
def convert_gene_transcripts(gene, chain_file, output_file):
"""
Convert a gene's transcripts from the GTG DB file and chain file.
:param gene: the gene information
:param chain_file: the chain file
:param output_file: the new chain file
:return:
"""
LOG.debug("Gene = {0}".format(str(gene)))
if not isinstance(chain_file, ChainFile):
chain_file = ChainFile(chain_file)
transcripts = gene['transcripts']
for t in transcripts:
transcript = transcripts[t]
LOG.debug("Transcript = {0}".format(str(transcript)))
chain_entries = []
from_start = None
to_start = None
for exon_id in transcript['exons']:
exon = transcript['exons'][exon_id]
LOG.debug("Exon = {0}".format(str(exon)))
mappings = chain_file.find_mappings(gene['chrom'], exon['start'], exon['end'])
if mappings and len(mappings) > 0:
if not from_start:
from_start = mappings[0].from_start
to_start = mappings[0].to_start
if len(mappings) == 1:
m = mappings[0]
c = ChainEntry()
#c.from_start = m.from_start
#c.from_end = m.from_end
#c.to_start = m.to_start
#c.to_end = m.to_end
c.lines.append([m.from_end - m.from_start])
chain_entries.append(c)
else:
c = ChainEntry()
#c.from_start = mappings[0].from_start
#c.from_end = mappings[-1].from_end
#c.to_start = mappings[0].to_start
#c.to_end = mappings[-1].to_end
prev_mapping = None
sum_size = 0
sum_dq = 0
sum_dt = 0
dq = 0
prev_dq = 0
dt = 0
prev_dt = 0
for m in mappings:
if not prev_mapping:
prev_mapping = m
else:
prev_dt = dt
prev_dq = dq
chain_size = prev_mapping.from_end - prev_mapping.from_start
dt = m.from_start - prev_mapping.from_end
dq = m.to_start - prev_mapping.to_end
if dt > 0:
chain_size += prev_dq
sum_size += chain_size
sum_dq += dq
sum_dt += dt
c.lines.append([chain_size, dt, dq])
LOG.debug(c.lines[-1])
prev_mapping = m
chain_size = mappings[-1].from_end - mappings[-1].from_start
if dt > 0:
chain_size += dq
sum_size += dq
c.lines.append([chain_size])
chain_entries.append(c)
# collapse exons
if chain_entries and len(chain_entries) > 0:
chain_entries = collapse_entries(chain_entries)
sum_size = 0
sum_dq = 0
sum_dt = 0
lines = []
for line in chain_entries[0].lines:
sum_size += line[0]
if len(line) > 1:
sum_dq += line[1]
sum_dt += line[2]
lines.append('\t'.join(map(str, line)))
if output_file:
outf = open(output_file, "a")
else:
outf = sys.stdout
outf.write(CHAIN_STRING.format(CHAIN_STRING,
from_chr=t, from_length=sum_size + sum_dq,
from_start=0, from_end=sum_size + sum_dq,
to_chr=t, to_length=sum_size + sum_dt,
to_start=0, to_end=sum_size + sum_dt, id=t))
outf.write("\n")
outf.write("\n".join(lines))
outf.write("\n")
outf.close()
def db2chain(chain_file, input_file, output_file, chain_genes=False):
"""
:param chain_file:
:param input_file:
:param output_file:
:param chain_genes:
:return:
"""
start = time.time()
if not isinstance(chain_file, ChainFile):
chain_file = g2g_fu.check_file(chain_file)
input_file = g2g_fu.check_file(input_file)
output_file_name = g2g_fu.check_file(output_file, 'w')
LOG.info("CHAIN FILE: {0}".format(chain_file))
LOG.info("INPUT FILE: {0}".format(input_file))
LOG.info("OUTPUT FILE: {0}".format(output_file_name))
if chain_genes:
LOG.info("CHAIN TYPE: GENES")
else:
LOG.info("CHAIN TYPE: TRANSCRIPTS")
if not isinstance(chain_file, ChainFile):
LOG.info("Parsing chain file...")
chain_file = ChainFile(chain_file)
LOG.info("Chain file parsed")
LOG.info('Creating new chain file...')
if chain_genes:
LOG.debug("Generating chain for genes")
for chromosome in chain_file.get_seqids():
LOG.debug("Generating chain for genes in chromosome {0}".format(chromosome))
for i, gene in enumerate(get_genes_simple(input_file, location=Location(chromosome))):
LOG.debug("\n{0}".format(gene))
chain_entries = []
from_start = None
to_start = None
from_end = None
to_end = None
mappings = chain_file.find_mappings(gene.seqid, gene.start, gene.end)
if gene.strand == 1:
if mappings and len(mappings) > 0:
if not from_start:
from_start = mappings[0].from_start
to_start = mappings[0].to_start
if len(mappings) == 1:
m = mappings[0]
c = ChainEntry()
c.lines.append([m.from_end - m.from_start])
chain_entries.append(c)
else:
c = ChainEntry()
prev_mapping = None
sum_size = 0
sum_dq = 0
sum_dt = 0
dq = 0
prev_dq = 0
dt = 0
prev_dt = 0
for m in mappings:
if not prev_mapping:
prev_mapping = m
else:
prev_dt = dt
prev_dq = dq
chain_size = prev_mapping.from_end - prev_mapping.from_start
dt = m.from_start - prev_mapping.from_end
dq = m.to_start - prev_mapping.to_end
if dt > 0:
chain_size += prev_dq
sum_size += chain_size
sum_dq += dq
sum_dt += dt
c.lines.append([chain_size, dt, dq])
LOG.debug(c.lines[-1])
prev_mapping = m
chain_size = mappings[-1].from_end - mappings[-1].from_start
if dt > 0:
chain_size += dq
sum_size += dq
c.lines.append([chain_size])
chain_entries.append(c)
else:
if mappings and len(mappings) > 0:
if not from_end:
from_end = mappings[-1].from_end
to_end = mappings[-1].to_end
if len(mappings) == 1:
m = mappings[0]
c = ChainEntry()
c.lines.append([m.from_end - m.from_start])
chain_entries.append(c)
else:
c = ChainEntry()
prev_mapping = None
sum_size = 0
sum_dq = 0
sum_dt = 0
dq = 0
prev_dq = 0
dt = 0
prev_dt = 0
# reverse
mappings = mappings[::-1]
for m in mappings:
LOG.debug("CURRENT MAPPING: {0}".format(m))
if not prev_mapping:
prev_mapping = m
else:
LOG.debug("PREV MAPPING: {0}".format(prev_mapping))
prev_dt = dt
prev_dq = dq
chain_size = prev_mapping.from_end - prev_mapping.from_start
#dt = m.from_start - prev_mapping.from_end
#dq = m.to_start - prev_mapping.to_end
dt = prev_mapping.from_start - m.from_end
dq = prev_mapping.to_start - m.to_end
LOG.debug("dt={0}, dq={1}".format(dt, dq))
#if dt > 0:
# LOG.debug("DT > 0, ADDING to current chain_size {0}".format(chain_size))
# chain_size += prev_dq
sum_size += chain_size
sum_dq += dq
sum_dt += dt
c.lines.append([chain_size, dt, dq])
LOG.debug(c.lines[-1])
prev_mapping = m
LOG.debug("finding last...{0}".format(mappings[-1]))
chain_size = mappings[-1].from_end - mappings[-1].from_start
#if dt > 0:
# LOG.debug("WHOA {0}".format(dt))
# LOG.debug("DT > 0, ADDING to current chain_size {0}".format(chain_size))
# chain_size += dq
# sum_size += dq
c.lines.append([chain_size])
LOG.debug(c.lines[-1])
chain_entries.append(c)
if chain_entries and len(chain_entries) > 0:
sum_size = 0
sum_dq = 0
sum_dt = 0
lines = []
for line in chain_entries[0].lines:
sum_size += line[0]
if len(line) > 1:
sum_dq += line[1]
sum_dt += line[2]
lines.append('\t'.join(map(str, line)))
if output_file:
outf = open(output_file, "a")
else:
outf = sys.stdout
outf.write(CHAIN_STRING.format(CHAIN_STRING,
from_chr=gene.seqid, from_length=sum_size + sum_dq,
from_start=0, from_end=sum_size + sum_dq,
to_chr=gene.seqid, to_length=sum_size + sum_dt,
to_start=0, to_end=sum_size + sum_dt, id=gene.ensembl_id))
outf.write("\n")
outf.write("\n".join(lines))
outf.write("\n")
outf.close()
else:
for chromosome in chain_file.get_seqids():
LOG.debug("Generating chain for transcripts in chromosome {0}".format(chromosome))
for i, transcript in enumerate(get_transcripts_simple(input_file, location=Location(chromosome))):
LOG.debug("Transcript = {0}".format(transcript))
chain_entries = []
from_start = None
to_start = None
from_end = None
to_end = None
transcript.exons = OrderedDict(sorted(transcript.exons.items(), key=lambda x: x[1].exon_number))
for ensembl_id, exon in transcript.exons.iteritems():
LOG.debug("Exon = {0}".format(exon))
mappings = chain_file.find_mappings(exon.seqid, exon.start, exon.end)
if exon.strand == 1:
if mappings and len(mappings) > 0:
if not from_start:
from_start = mappings[0].from_start
to_start = mappings[0].to_start
if len(mappings) == 1:
m = mappings[0]
c = ChainEntry()
c.lines.append([m.from_end - m.from_start])
chain_entries.append(c)
else:
c = ChainEntry()
prev_mapping = None
sum_size = 0
sum_dq = 0
sum_dt = 0
dq = 0
prev_dq = 0
dt = 0
prev_dt = 0
for m in mappings:
if not prev_mapping:
prev_mapping = m
else:
prev_dt = dt
prev_dq = dq
chain_size = prev_mapping.from_end - prev_mapping.from_start
dt = m.from_start - prev_mapping.from_end
dq = m.to_start - prev_mapping.to_end
if dt > 0:
chain_size += prev_dq
sum_size += chain_size
sum_dq += dq
sum_dt += dt
c.lines.append([chain_size, dt, dq])
LOG.debug(c.lines[-1])
prev_mapping = m
chain_size = mappings[-1].from_end - mappings[-1].from_start
if dt > 0:
chain_size += dq
sum_size += dq
c.lines.append([chain_size])
chain_entries.append(c)
else:
if mappings and len(mappings) > 0:
if not from_end:
from_end = mappings[-1].from_end
to_end = mappings[-1].to_end
if len(mappings) == 1:
m = mappings[0]
c = ChainEntry()
c.lines.append([m.from_end - m.from_start])
chain_entries.append(c)
else:
c = ChainEntry()
prev_mapping = None
sum_size = 0
sum_dq = 0
sum_dt = 0
dq = 0
prev_dq = 0
dt = 0
prev_dt = 0
# reverse
mappings = mappings[::-1]
for m in mappings:
LOG.debug("CURRENT MAPPING: {0}".format(m))
if not prev_mapping:
prev_mapping = m
else:
LOG.debug("PREV MAPPING: {0}".format(prev_mapping))
prev_dt = dt
prev_dq = dq
chain_size = prev_mapping.from_end - prev_mapping.from_start
#dt = m.from_start - prev_mapping.from_end
#dq = m.to_start - prev_mapping.to_end
dt = prev_mapping.from_start - m.from_end
dq = prev_mapping.to_start - m.to_end
LOG.debug("dt={0}, dq={1}".format(dt, dq))
#if dt > 0:
# LOG.debug("DT > 0, ADDING to current chain_size {0}".format(chain_size))
# chain_size += prev_dq
sum_size += chain_size
sum_dq += dq
sum_dt += dt
c.lines.append([chain_size, dt, dq])
LOG.debug(c.lines[-1])
prev_mapping = m
LOG.debug("finding last...{0}".format(mappings[-1]))
chain_size = mappings[-1].from_end - mappings[-1].from_start
#if dt > 0:
# LOG.debug("WHOA {0}".format(dt))
# chain_size += dq
# sum_size += dq
c.lines.append([chain_size])
LOG.debug(c.lines[-1])
chain_entries.append(c)
# collapse exons
if chain_entries and len(chain_entries) > 0:
LOG.debug('>>>>>>>')
for c in chain_entries:
LOG.debug(str(c))
LOG.debug('>>>>>>>')
chain_entries = collapse_entries(chain_entries)
sum_size = 0
sum_dq = 0
sum_dt = 0
lines = []
for line in chain_entries[0].lines:
sum_size += line[0]
if len(line) > 1:
sum_dq += line[1]
sum_dt += line[2]
lines.append('\t'.join(map(str, line)))
if output_file:
outf = open(output_file, "a")
else:
outf = sys.stdout
outf.write(CHAIN_STRING.format(CHAIN_STRING,
from_chr=transcript.seqid, from_length=sum_size + sum_dq,
from_start=0, from_end=sum_size + sum_dq,
to_chr=transcript.seqid, to_length=sum_size + sum_dt,
to_start=0, to_end=sum_size + sum_dt, id=transcript.ensembl_id))
outf.write("\n")
outf.write("\n".join(lines))
outf.write("\n")
outf.close()
LOG.info('New chain file created')
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
def offset_chromosome_to_chain(chromosome, chain_file):
"""
Convert Seqnature offset file to chain file
:param chromosome: the chromosome data
:param chain_file: the chain file to write to
"""
if not os.path.exists(chromosome['file_path']):
raise IOError('Cannot find {0}'.format(chromosome['file_path']))
prev_offset_pos = 0
prev_offset_amt = 0
first_offset = 0
ref_after_pos = 0
sum_size = 0
sum_dt = 0
sum_dq = 0
chain_data = []
fd = open(chromosome['file_path'], 'r')
for line in fd:
elem = line.strip().split()
offset_pos = int(elem[0])
offset_amt = int(elem[1])
LOG.debug('LINE: {0}'.format(elem))
chain_size = 0
dt = 0
dq = 0
if prev_offset_pos == 0 and prev_offset_amt == 0:
# first time through
LOG.debug("ref_after_pos=" + str(ref_after_pos))
LOG.debug('offset_pos=' + str(offset_pos) + '\toffset_amt=' + str(offset_amt))
LOG.debug('prev_pos=' + str(prev_offset_pos) + '\tprev_amt=' + str(prev_offset_amt))
offset_diff = offset_amt
LOG.debug('offset_diff= ' + str(offset_diff))
if offset_amt >= 0:
dt = offset_amt
ref_after_pos = offset_pos
else:
dq = offset_amt * -1
ref_after_pos = offset_pos + dq
chain_size = offset_pos - 1
first_offset = chain_size
sum_size = chain_size
sum_dt = dt
sum_dq = dq
chain_data.append([chain_size, dt, dq])
prev_offset_pos = offset_pos
prev_offset_amt = offset_amt
LOG.debug('>> {0}\t{1}\t{2}'.format(sum_size, sum_dt, sum_dq))
else:
offset_diff = offset_amt - prev_offset_amt
LOG.debug("PREV ref_after_pos=" + str(ref_after_pos))
LOG.debug("offset_pos=" + str(offset_pos))
if offset_diff >= 0:
dt = offset_diff
chain_size = offset_pos - ref_after_pos
ref_after_pos = offset_pos
else:
dq = offset_diff * -1
chain_size = offset_pos - ref_after_pos
ref_after_pos = offset_pos + dq
LOG.debug("ref_after_pos=" + str(ref_after_pos))
sum_size += chain_size
sum_dt += dt
sum_dq += dq
# fix any 0 length
if chain_size != 0:
chain_data.append([chain_size, dt, dq])
LOG.debug(chain_data[-1])
else:
LOG.debug("offset_diff=" + str(offset_diff))
LOG.debug("offset_amt=" + str(offset_amt))
LOG.debug("prev_offset_amt=" + str(prev_offset_amt))
LOG.debug("COMBINING!!!!!!!!!!")
LOG.debug(chain_data[-1])
LOG.debug("dt={0}".format(dt))
LOG.debug("dq={0}".format(dq))
chain_data[-1][1] += dt
chain_data[-1][2] += dq
prev_offset_pos = offset_pos
prev_offset_amt = offset_amt
LOG.debug('>>SUM {0}\t{1}\t{2}'.format(sum_size, sum_dt, sum_dq))
LOG.debug('>> {0}\t{1}\t{2}'.format(chain_size, dt, dq))
# not sure if the following assumption will work in all cases
# the last line of the chain segment is the last fragment length
if chromosome['from_length'] > chromosome['to_length']:
last_chain = chromosome['from_length'] - (sum_size + sum_dt)
sum_size += last_chain
else:
last_chain = chromosome['to_length'] - (sum_size + sum_dq)
sum_size += last_chain
chain_fd = sys.stdout
if chain_file:
chain_fd = open(chain_file, "a")
chain_fd.write(CHAIN_STRING.format(CHAIN_STRING,
from_chr=chromosome['from_chr'], from_length=chromosome['from_length'],
from_start=0, from_end=sum_size + sum_dt,
to_chr=chromosome['to_chr'], to_length=chromosome['to_length'],
to_start=0, to_end=sum_size + sum_dq, id=chromosome['to_chr']))
chain_fd.write("\n")
for c in chain_data:
chain_fd.write("{0}\t{1}\t{2}\n".format(c[0], c[1], c[2]))
chain_fd.write("{0}\n\n".format(last_chain))
chain_fd.close()
def offset2chain(from_file, to_file, output_file):
"""
Convert Seqnature offset files to new chain file.
:param from_file: from Chromosome File (see docs)
:param to_file: to Chromosome File (see docs)
:param output_file: the output chain file
"""
start = time.time()
from_file = g2g_fu.check_file(from_file)
to_file = g2g_fu.check_file(to_file)
output_file_name = g2g_fu.check_file(output_file, 'w')
g2g_fu.delete_file(output_file_name)
LOG.info("FROM FILE: {0}".format(from_file))
LOG.info("TO FILE: {0}".format(to_file))
LOG.info("CHAIN FILE: {0}".format(output_file_name))
LOG.info("Generating chain file...")
try:
chromosomes = offset_parse_chromosomes(from_file, to_file)
for c, chromosome in chromosomes.iteritems():
LOG.debug('Examining chromosome: {0}'.format(chromosome))
if chromosome['file_path']:
offset_chromosome_to_chain(chromosome, output_file)
else:
LOG.debug("No file for {0}, so skipping".format(chromosome))
LOG.info("Chain file created")
except Exception, e:
raise G2GChainFileError("Unable to generate chain file")
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
def fasta_extract_transcripts(fasta_file, database_file, output, raw=False):
start = time.time()
if isinstance(fasta_file, FastaFile):
fasta = fasta_file
else:
fasta_file = g2g_fu.check_file(fasta_file)
fasta = FastaFile(fasta_file)
database_file = g2g_fu.check_file(database_file)
fasta_out = sys.stdout
if output:
output = g2g_fu.check_file(output, 'w')
fasta_out = open(output, "w")
LOG.info("FASTA FILE: {0}".format(fasta.filename))
LOG.info("DATABASE FILE: {0}".format(database_file))
LOG.info("OUTPUT FILE: {0}".format(fasta_out.name))
try:
transcripts = get_transcripts_simple(database_file)
for i, transcript in enumerate(transcripts):
LOG.debug("Transcript={0}".format(transcript))
if transcript.seqid not in fasta.references:
continue
new_sequence = StringIO()
locations = []
for ensembl_id, exon in transcript.exons.iteritems():
LOG.debug("Exon ID={0};{1}".format(ensembl_id, exon))
partial_seq = fasta.fetch(exon.seqid, exon.start-1, exon.end)
partial_seq_str = str(partial_seq)
if transcript.strand == 1:
new_sequence.write(partial_seq_str)
else:
partial_seq_str = str(reverse_complement_sequence(partial_seq))
new_sequence.write(partial_seq_str)
LOG.debug("{0}:{1}-{2} (Length: {3})\n{4}".format(exon.seqid, exon.start, exon.end, len(partial_seq), partial_seq_str))
locations.append("{0}:{1}-{2}".format(exon.seqid, exon.start, exon.end))
if raw:
fasta_out.write(new_sequence.getvalue())
else:
fasta_id = ">{0} {1}|{2}\n".format(transcript.ensembl_id, '-' if transcript.strand == -1 else '+', "|".join(locations))
fasta_out.write(fasta_id)
for line in wrap_sequence(new_sequence.getvalue()):
fasta_out.write(line.strip())
fasta_out.write('\n')
except G2GValueError as e:
LOG.info(e.msg.rstrip())
raise e
except G2GFastaError as e:
LOG.info(e.msg.rstrip())
raise e
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
def fasta_extract_exons(fasta_file, database_file, output, raw=False):
start = time.time()
if isinstance(fasta_file, FastaFile):
fasta = fasta_file
else:
fasta_file = g2g_fu.check_file(fasta_file)
fasta = FastaFile(fasta_file)
database_file = g2g_fu.check_file(database_file)
fasta_out = sys.stdout
if output:
output = g2g_fu.check_file(output, 'w')
fasta_out = open(output, "w")
LOG.info("FASTA FILE: {0}".format(fasta.filename))
LOG.info("DATABASE FILE: {0}".format(database_file))
LOG.info("OUTPUT FILE: {0}".format(fasta_out.name))
try:
transcripts = get_transcripts_simple(database_file)
for i, transcript in enumerate(transcripts):
if transcript.seqid not in fasta.references:
continue
for ensembl_id, exon in transcript.exons.iteritems():
LOG.debug("Exon={0}".format(exon))
partial_seq = fasta.fetch(exon.seqid, exon.start-1, exon.end)
partial_seq_str = partial_seq
if transcript.strand == -1:
partial_seq_str = str(reverse_complement_sequence(partial_seq))
LOG.debug("{0}:{1}-{2} (Length: {3})\n{4}".format(exon.seqid, exon.start, exon.end, len(partial_seq), partial_seq_str))
if raw:
fasta_out.write(partial_seq_str)
else:
fasta_id = ">{0} {1}:{2}-{3}\n".format(exon.ensembl_id, exon.seqid, exon.start, exon.end)
fasta_out.write(fasta_id)
for line in wrap_sequence(partial_seq_str):
fasta_out.write(line.strip())
fasta_out.write('\n')
except G2GValueError as e:
LOG.info(e.msg.rstrip())
raise e
except G2GFastaError as e:
LOG.info(e.msg.rstrip())
raise e
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
def dbfetch(input_file, output_file=None, locations=None, ids=None, display_genes=True, display_transcripts=True, display_exons=True, overlap=True):
"""
"""
start = time.time()
if input_file and os.path.exists(input_file):
input_file = os.path.abspath(input_file)
else:
raise G2GValueError("The following G2G DB file does not exist: {0}".format(input_file))
output_file_name = None
if output_file:
output_file = os.path.abspath(output_file)
output_file_dir = os.path.dirname(output_file)
output_file_name = output_file
if not os.access(output_file_dir, os.W_OK | os.X_OK):
raise G2GValueError("Cannot generate file in specified directory: {0}".format(output_file_dir))
LOG.info("DB FILE: {0}".format(input_file))
if output_file_name:
LOG.info("OUTPUT FILE: {0}".format(output_file_name))
if (locations is None and ids is None) or (locations and ids):
raise G2GValueError("Can only search by location or ids separately")
genes = {}
if locations:
try:
for l in locations:
if not isinstance(l, Location):
l = parse_location(l)
genes = merge_dicts(genes, get_genes(input_file, l))
except G2GLocationError, le:
LOG.debug("Unable to parse location, {0}".format(le.message))
raise le
else:
for i in ids:
genes = merge_dicts(genes, get_gene(input_file, i))
for k,v in genes.iteritems():
indent = 0
if display_genes:
print v
for k1, v1 in v.transcripts.iteritems():
if display_transcripts:
indent = 1 if display_genes else 0
print '\t'*indent, str(v1)
if display_exons:
if display_genes and display_transcripts:
indent = 2
elif display_genes or display_transcripts:
indent = 1
else:
indent = 0
for k2,v2 in v1.exons.iteritems():
print '\t'*indent, str(v2)
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
def fasta_transform(fasta_file, chain_file, locations, output_file, bgzip=False, reverse=False):
"""
:param fasta_file:
:param chain_file:
:param locations:
:param output_file:
:param bgzip:
:param reverse:
:return:
"""
start = time.time()
if not isinstance(fasta_file, FastaFile):
fasta_file = g2g_fu.check_file(fasta_file)
if not isinstance(chain_file, ChainIter):
chain_file = g2g_fu.check_file(chain_file)
output_file = g2g_fu.check_file(output_file, 'w')
g2g_fu.delete_file(output_file)
g2g_fu.delete_index_files(output_file)
LOG.info("FASTA FILE: {0}".format(fasta_file))
LOG.info("CHAIN FILE: {0}".format(chain_file))
LOG.info("OUTPUT FILE: {0}".format(output_file))
LOG.info("BGZIP: {0}".format(bgzip))
LOG.info("REVERSE: {0}".format(reverse))
if isinstance(fasta_file, FastaFile):
fasta = fasta_file
else:
fasta = FastaFile(fasta_file)
if not isinstance(chain_file, ChainIter):
chain_file = ChainIter(chain_file, reverse=reverse)
seq_ids = []
if locations:
LOG.debug("Have locations")
new_locations = []
for l in locations:
if isinstance(l, Location):
new_locations.append(l)
else:
new_locations.append(parse_location(l))
seq_ids.append(new_locations[-1].seqid)
locations = new_locations
else:
LOG.debug("Calculating locations")
locations = [parse_location("{0}:1-{1}".format(a, fasta.get_reference_length(a)), 1) for a in fasta.references]
seq_ids = [a for a in fasta.references]
temp_output_file = output_file
if bgzip:
if g2g_fu.get_extension(output_file) != 'gz':
output_file = "{0}.gz".format(output_file)
else:
temp_output_file = temp_output_file[:-3]
fasta_out = open(temp_output_file, "w")
LOG.info("Transforming...")
chr_info = {}
try:
# will need a better way, but for now...
LOG.info("Parsing chain file...")
for line in chain_file:
if len(line) > 7:
LOG.debug("Adding chromosome {0}".format(chain_file.current_chain_header[1]))
chr_info[chain_file.current_chain_header[1]] = {'from_size': line[2], 'from_start': line[4], 'from_end': line[5],
'to_size': line[7], 'to_start': line[9], 'to_end': line[10],
'header_chain':chain_file.current_chain_header, 'lines': []}
else:
chr_info[chain_file.current_chain_header[1]]['lines'].append(line)
LOG.info("Chain file parsed")
insertion_bases = 0
deletion_bases = 0
for location in locations:
LOG.info("Processing chromosome={0}".format(location.seqid))
LOG.debug("Location: {0}".format(location))
chrom_size_from = chr_info[location.seqid]['from_size']
chrom_size_to = chr_info[location.seqid]['to_size']
last_pos = chr_info[location.seqid]['from_start']
new_sequence = StringIO()
chain_file.reset()
for chain_line in chr_info[location.seqid]['lines']:
LOG.debug("\nLINE: {0} : {1}".format(chain_file.line_no, chain_line))
if len(chain_line) == 1:
# last line
fragment = chain_line[0]
partial_seq = fasta.fetch(location.seqid, last_pos, last_pos + fragment)
new_sequence.write(str(partial_seq))
if len(new_sequence.getvalue()) < chrom_size_to:
LOG.warn("Length's do not match, chromosome length in chain: {0}, sequence length: {1}".format(chrom_size_to, len(new_sequence.getvalue())))
fasta_out.write(">{0} {1}:{2}-{3}\n".format(location.seqid, location.seqid, chr_info[location.seqid]['from_start'] + 1, chrom_size_to))
for l in wrap_sequence(new_sequence.getvalue()):
fasta_out.write(l.strip())
fasta_out.write('\n')
break
else:
# fragment_size dt_size dq_size same_bases dt_bases dq_bases
fragment = chain_line[0]
dt = chain_line[1 if not reverse else 2]
dq = chain_line[2 if not reverse else 1]
same = chain_line[3]
dt_bases = chain_line[4 if not reverse else 5]
dq_bases = chain_line[5 if not reverse else 4]
partial_seq = fasta.fetch(location.seqid, last_pos, last_pos + fragment)
new_sequence.write(partial_seq)
if dq > 0:
# insertion
LOG.debug("INSERTION")
new_sequence.write(dq_bases)
LOG.debug("{0}:{1}-{2} (Length: {3})".format(location.seqid, last_pos, last_pos + fragment, len(partial_seq)))
if len(partial_seq) > 100:
LOG.debug("{0}...{1}".format(partial_seq[:10], partial_seq[-10:]))
else:
LOG.debug(partial_seq)
LOG.debug("Adding {0}".format(dq_bases))
LOG.debug("SAME={0}, {1}".format(same, partial_seq[-(len(same)):]))
insertion_bases += dq
if dt > 0:
# deletion
LOG.debug("DELETION")
last_pos += dt
LOG.debug("skipping ahead {0} bases".format(dt))
deletion_bases += dt
last_pos += fragment
LOG.debug("LAST_POS={0}, INSERTIONS={1}, DELETIONS={2}, DIFF={3}".format(last_pos, insertion_bases, deletion_bases, (insertion_bases - deletion_bases)))
# bgzip and index
if bgzip:
LOG.info("Compressing and indexing...")
g2g_fu.bgzip_index(temp_output_file, output_file, 'fa')
except G2GLocationError, le:
LOG.debug("Unable to parse location, {0}".format(le.message))
raise le
except G2GValueError as e:
LOG.debug("Unable to parse alocation, {0}".format(e.message))
raise e
except G2GFastaError as e:
LOG.debug("Unable to parse blocation, {0}".format(e.message))
raise e
except TypeError as e:
LOG.debug("Unable to parse clocation, {0}".format(e.message))
raise e
#print str(e)
#raise G2GError("Improper chain file")
LOG.info("Transforming complete")
LOG.info("Execution complete: {0}".format(format_time(start, time.time())))
|
everestial/g2gtools
|
g2gtools/g2g.py
|
Python
|
gpl-3.0
| 42,880
|
[
"pysam"
] |
bd3621ea2b3f2dc596584a67fce3835d1160a2aa1467c9a686093e2bbcf65292
|
""" NOTA BENE: This agent should NOT be run alone. Instead, it serves as a base class for extensions.
The TaskManagerAgentBase is the base class to submit tasks to external systems,
monitor and update the tasks and file status in the transformation DB.
This agent is extended in WorkflowTaskAgent and RequestTaskAgent.
In case you want to further extend it you are required to follow the note on the
initialize method and on the _getClients method.
"""
import time
import datetime
import concurrent.futures
from DIRAC import S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.Dictionaries import breakDictionaryIntoChunks
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getDNForUsername, getUsernameForDN
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.TransformationSystem.Client.FileReport import FileReport
from DIRAC.TransformationSystem.Client.WorkflowTasks import WorkflowTasks
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.TransformationAgentsUtilities import TransformationAgentsUtilities
from DIRAC.WorkloadManagementSystem.Client import JobStatus
from DIRAC.WorkloadManagementSystem.Client.JobManagerClient import JobManagerClient
AGENT_NAME = "Transformation/TaskManagerAgentBase"
class TaskManagerAgentBase(AgentModule, TransformationAgentsUtilities):
"""To be extended. Please look at WorkflowTaskAgent and RequestTaskAgent."""
def __init__(self, *args, **kwargs):
"""c'tor
Always call this in the extension agent
"""
AgentModule.__init__(self, *args, **kwargs)
TransformationAgentsUtilities.__init__(self)
self.transClient = None
self.jobManagerClient = None
self.transType = []
self.tasksPerLoop = 50
self.maxParametricJobs = 20 # will be updated in execute()
# credentials
self.shifterProxy = None
self.credentials = None
self.credTuple = (None, None, None)
self.pluginLocation = ""
self.bulkSubmissionFlag = False
#############################################################################
def initialize(self):
"""Agent initialization.
The extensions MUST provide in the initialize method the following data members:
- TransformationClient objects (self.transClient),
- set the shifterProxy if different from the default one set here ('ProductionManager')
- list of transformation types to be looked (self.transType)
"""
gMonitor.registerActivity(
"SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks", gMonitor.OP_ACUM
)
self.pluginLocation = self.am_getOption("PluginLocation", "DIRAC.TransformationSystem.Client.TaskManagerPlugin")
# Default clients
self.transClient = TransformationClient()
self.jobManagerClient = JobManagerClient()
# Bulk submission flag
self.bulkSubmissionFlag = self.am_getOption("BulkSubmission", self.bulkSubmissionFlag)
# Shifter credentials to use, could replace the use of shifterProxy eventually
self.shifterProxy = self.am_getOption("shifterProxy", self.shifterProxy)
self.credentials = self.am_getOption("ShifterCredentials", self.credentials)
resCred = self.__getCredentials()
if not resCred["OK"]:
return resCred
# setting up the threading
maxNumberOfThreads = self.am_getOption("maxNumberOfThreads", 15)
self.log.verbose("Multithreaded with %d threads" % maxNumberOfThreads)
self.threadPoolExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=maxNumberOfThreads)
return S_OK()
def finalize(self):
"""graceful finalization"""
method = "finalize"
self._logInfo("Wait for threads to get empty before terminating the agent", method=method)
self.threadPoolExecutor.shutdown()
self._logInfo("Threads are empty, terminating the agent...", method=method)
return S_OK()
def execute(self):
"""The execution method is transformations that need to be processed"""
# 1. determining which credentials will be used for the submission
owner, ownerGroup, ownerDN = None, None, None
# getting the credentials for submission
resProxy = getProxyInfo(proxy=False, disableVOMS=False)
if resProxy["OK"]: # there is a shifterProxy
proxyInfo = resProxy["Value"]
owner = proxyInfo["username"]
ownerGroup = proxyInfo["group"]
ownerDN = proxyInfo["identity"]
self.log.info("ShifterProxy: Tasks will be submitted with the credentials %s:%s" % (owner, ownerGroup))
elif self.credentials:
owner, ownerGroup, ownerDN = self.credTuple
else:
self.log.info("Using per Transformation Credentials!")
# 2. Determining which operations to do on each transformation
self.operationsOnTransformationDict = {} # key: TransID. Value: dict with body, and list of operations
# 2.1 Determine whether the task status is to be monitored and updated
if not self.am_getOption("MonitorTasks", ""):
self.log.verbose("Monitoring of tasks is disabled. To enable it, create the 'MonitorTasks' option")
else:
# Get the transformations for which the tasks have to be updated
status = self.am_getOption(
"UpdateTasksTransformationStatus",
self.am_getOption("UpdateTasksStatus", ["Active", "Completing", "Stopped"]),
)
transformations = self._selectTransformations(transType=self.transType, status=status, agentType=[])
if not transformations["OK"]:
self.log.warn("Could not select transformations:", transformations["Message"])
else:
self._addOperationForTransformations(
self.operationsOnTransformationDict,
"updateTaskStatus",
transformations,
owner=owner,
ownerGroup=ownerGroup,
ownerDN=ownerDN,
)
# 2.2. Determine whether the task files status is to be monitored and updated
if not self.am_getOption("MonitorFiles", ""):
self.log.verbose("Monitoring of files is disabled. To enable it, create the 'MonitorFiles' option")
else:
# Get the transformations for which the files have to be updated
status = self.am_getOption(
"UpdateFilesTransformationStatus",
self.am_getOption("UpdateFilesStatus", ["Active", "Completing", "Stopped"]),
)
transformations = self._selectTransformations(transType=self.transType, status=status, agentType=[])
if not transformations["OK"]:
self.log.warn("Could not select transformations:", transformations["Message"])
else:
self._addOperationForTransformations(
self.operationsOnTransformationDict,
"updateFileStatus",
transformations,
owner=owner,
ownerGroup=ownerGroup,
ownerDN=ownerDN,
)
# Determine whether the checking of reserved tasks is to be performed
if not self.am_getOption("CheckReserved", ""):
self.log.verbose("Checking of reserved tasks is disabled. To enable it, create the 'CheckReserved' option")
else:
# Get the transformations for which the check of reserved tasks have to be performed
status = self.am_getOption(
"CheckReservedTransformationStatus",
self.am_getOption("CheckReservedStatus", ["Active", "Completing", "Stopped"]),
)
transformations = self._selectTransformations(transType=self.transType, status=status, agentType=[])
if not transformations["OK"]:
self.log.warn("Could not select transformations:", transformations["Message"])
else:
self._addOperationForTransformations(
self.operationsOnTransformationDict,
"checkReservedTasks",
transformations,
owner=owner,
ownerGroup=ownerGroup,
ownerDN=ownerDN,
)
# Determine whether the submission of tasks is to be performed
if not self.am_getOption("SubmitTasks", "yes"):
self.log.verbose("Submission of tasks is disabled. To enable it, create the 'SubmitTasks' option")
else:
# Get the transformations for which the submission of tasks have to be performed
status = self.am_getOption(
"SubmitTransformationStatus", self.am_getOption("SubmitStatus", ["Active", "Completing"])
)
transformations = self._selectTransformations(transType=self.transType, status=status)
if not transformations["OK"]:
self.log.warn("Could not select transformations:", transformations["Message"])
else:
# Get the transformations which should be submitted
self.tasksPerLoop = self.am_getOption("TasksPerLoop", self.tasksPerLoop)
res = self.jobManagerClient.getMaxParametricJobs()
if not res["OK"]:
self.log.warn("Could not get the maxParametricJobs from JobManager", res["Message"])
else:
self.maxParametricJobs = res["Value"]
self._addOperationForTransformations(
self.operationsOnTransformationDict,
"submitTasks",
transformations,
owner=owner,
ownerGroup=ownerGroup,
ownerDN=ownerDN,
)
# now call _execute...
future_to_transID = {}
for transID, transDict in self.operationsOnTransformationDict.items():
future = self.threadPoolExecutor.submit(self._execute, transDict)
future_to_transID[future] = transID
for future in concurrent.futures.as_completed(future_to_transID):
transID = future_to_transID[future]
try:
future.result()
except Exception as exc:
self._logError("%d generated an exception: %s" % (transID, exc))
else:
self._logInfo("Processed %d" % transID)
return S_OK()
def _selectTransformations(self, transType=None, status=None, agentType=None):
"""get the transformations"""
if status is None:
status = ["Active", "Completing"]
if agentType is None:
agentType = ["Automatic"]
selectCond = {}
if status:
selectCond["Status"] = status
if transType is not None:
selectCond["Type"] = transType
if agentType:
selectCond["AgentType"] = agentType
res = self.transClient.getTransformations(condDict=selectCond)
if not res["OK"]:
self.log.error("Failed to get transformations:", res["Message"])
elif not res["Value"]:
self.log.verbose("No transformations found")
else:
self.log.verbose("Obtained %d transformations" % len(res["Value"]))
return res
#############################################################################
def _getClients(self, ownerDN=None, ownerGroup=None):
"""Returns the clients used in the threads
This is another function that should be extended.
The clients provided here are defaults, and should be adapted
If ownerDN and ownerGroup are not None the clients will delegate to these credentials
:param str ownerDN: DN of the owner of the submitted jobs
:param str ownerGroup: group of the owner of the submitted jobs
:returns: dict of Clients
"""
threadTransformationClient = TransformationClient()
threadTaskManager = WorkflowTasks(ownerDN=ownerDN, ownerGroup=ownerGroup)
threadTaskManager.pluginLocation = self.pluginLocation
return {"TransformationClient": threadTransformationClient, "TaskManager": threadTaskManager}
def _execute(self, transDict):
"""This is what runs inside the threads, in practice this is the function that does the real stuff"""
# Each thread will have its own clients if we use credentials/shifterProxy
clients = (
self._getClients()
if self.shifterProxy
else self._getClients(ownerGroup=self.credTuple[1], ownerDN=self.credTuple[2])
if self.credentials
else None
)
method = "_execute"
operation = "None"
startTime = time.time()
try:
transID = transDict["TransformationID"]
operations = transDict["Operations"]
if not (self.credentials or self.shifterProxy):
ownerDN, group = transDict["OwnerDN"], transDict["OwnerGroup"]
clients = self._getClients(ownerDN=ownerDN, ownerGroup=group)
self._logInfo("Start processing transformation", method=method, transID=transID)
for operation in operations:
self._logInfo("Executing %s" % operation, method=method, transID=transID)
startOperation = time.time()
res = getattr(self, operation)(transDict, clients)
if not res["OK"]:
self._logError(
"Failed to execute '%s': %s" % (operation, res["Message"]), method=method, transID=transID
)
self._logInfo(
"Executed %s in %.1f seconds" % (operation, time.time() - startOperation),
method=method,
transID=transID,
)
except Exception as x: # pylint: disable=broad-except
self._logException(
"Exception executing operation %s" % operation, lException=x, method=method, transID=transID
)
finally:
self._logInfo(
"Processed transformation in %.1f seconds" % (time.time() - startTime), method=method, transID=transID
)
#############################################################################
# real operations done
def updateTaskStatus(self, transDict, clients):
"""Updates the task status"""
transID = transDict["TransformationID"]
method = "updateTaskStatus"
# Get the tasks which are in an UPDATE state, i.e. job statuses + request-specific statuses
updateStatus = self.am_getOption(
"TaskUpdateStatus",
[
JobStatus.CHECKING,
JobStatus.DELETED,
JobStatus.KILLED,
JobStatus.STAGING,
JobStatus.STALLED,
JobStatus.MATCHED,
JobStatus.RESCHEDULED,
JobStatus.COMPLETING,
JobStatus.COMPLETED,
JobStatus.SUBMITTING,
JobStatus.RECEIVED,
JobStatus.WAITING,
JobStatus.RUNNING,
"Scheduled",
"Assigned",
],
)
condDict = {"TransformationID": transID, "ExternalStatus": updateStatus}
timeStamp = str(datetime.datetime.utcnow() - datetime.timedelta(minutes=10))
# Get transformation tasks
transformationTasks = clients["TransformationClient"].getTransformationTasks(
condDict=condDict, older=timeStamp, timeStamp="LastUpdateTime"
)
if not transformationTasks["OK"]:
self._logError(
"Failed to get tasks to update:", transformationTasks["Message"], method=method, transID=transID
)
return transformationTasks
if not transformationTasks["Value"]:
self._logVerbose("No tasks found to update", method=method, transID=transID)
return transformationTasks
# Get status for the transformation tasks
chunkSize = self.am_getOption("TaskUpdateChunkSize", 0)
try:
chunkSize = int(chunkSize)
except ValueError:
chunkSize = 0
if chunkSize:
self._logVerbose(
"Getting %d tasks status (chunks of %d)" % (len(transformationTasks["Value"]), chunkSize),
method=method,
transID=transID,
)
else:
self._logVerbose(
"Getting %d tasks status" % len(transformationTasks["Value"]), method=method, transID=transID
)
updated = {}
for nb, taskChunk in enumerate(
breakListIntoChunks(transformationTasks["Value"], chunkSize)
if chunkSize
else [transformationTasks["Value"]]
):
submittedTaskStatus = clients["TaskManager"].getSubmittedTaskStatus(taskChunk)
if not submittedTaskStatus["OK"]:
self._logError(
"Failed to get updated task states:", submittedTaskStatus["Message"], method=method, transID=transID
)
return submittedTaskStatus
statusDict = submittedTaskStatus["Value"]
if not statusDict:
self._logVerbose("%4d: No tasks to update" % nb, method=method, transID=transID)
# Set status for tasks that changes
for status, taskIDs in statusDict.items():
self._logVerbose(
"%4d: Updating %d task(s) to %s" % (nb, len(taskIDs), status), method=method, transID=transID
)
setTaskStatus = clients["TransformationClient"].setTaskStatus(transID, taskIDs, status)
if not setTaskStatus["OK"]:
self._logError(
"Failed to update task status for transformation:",
setTaskStatus["Message"],
method=method,
transID=transID,
)
return setTaskStatus
updated[status] = updated.setdefault(status, 0) + len(taskIDs)
for status, nb in updated.items():
self._logInfo("Updated %d tasks to status %s" % (nb, status), method=method, transID=transID)
return S_OK()
def updateFileStatus(self, transDict, clients):
"""Update the files status"""
transID = transDict["TransformationID"]
method = "updateFileStatus"
timeStamp = str(datetime.datetime.utcnow() - datetime.timedelta(minutes=10))
# get transformation files
condDict = {"TransformationID": transID, "Status": ["Assigned"]}
transformationFiles = clients["TransformationClient"].getTransformationFiles(
condDict=condDict, older=timeStamp, timeStamp="LastUpdate"
)
if not transformationFiles["OK"]:
self._logError(
"Failed to get transformation files to update:",
transformationFiles["Message"],
method=method,
transID=transID,
)
return transformationFiles
if not transformationFiles["Value"]:
self._logInfo("No files to be updated", method=method, transID=transID)
return transformationFiles
# Get the status of the transformation files
# Sort the files by taskID
taskFiles = {}
for fileDict in transformationFiles["Value"]:
taskFiles.setdefault(fileDict["TaskID"], []).append(fileDict)
chunkSize = 100
self._logVerbose(
"Getting file status for %d tasks (chunks of %d)" % (len(taskFiles), chunkSize),
method=method,
transID=transID,
)
updated = {}
# Process 100 tasks at a time
for nb, taskIDs in enumerate(breakListIntoChunks(taskFiles, chunkSize)):
fileChunk = []
for taskID in taskIDs:
fileChunk += taskFiles[taskID]
submittedFileStatus = clients["TaskManager"].getSubmittedFileStatus(fileChunk)
if not submittedFileStatus["OK"]:
self._logError(
"Failed to get updated file states for transformation:",
submittedFileStatus["Message"],
method=method,
transID=transID,
)
return submittedFileStatus
statusDict = submittedFileStatus["Value"]
if not statusDict:
self._logVerbose("%4d: No file states to be updated" % nb, method=method, transID=transID)
continue
# Set the status of files
fileReport = FileReport(server=clients["TransformationClient"].getServer())
for lfn, status in statusDict.items():
updated[status] = updated.setdefault(status, 0) + 1
setFileStatus = fileReport.setFileStatus(transID, lfn, status)
if not setFileStatus["OK"]:
return setFileStatus
commit = fileReport.commit()
if not commit["OK"]:
self._logError(
"Failed to update file states for transformation:",
commit["Message"],
method=method,
transID=transID,
)
return commit
else:
self._logVerbose(
"%4d: Updated the states of %d files" % (nb, len(commit["Value"])), method=method, transID=transID
)
for status, nb in updated.items():
self._logInfo("Updated %d files to status %s" % (nb, status), method=method, transID=transID)
return S_OK()
def checkReservedTasks(self, transDict, clients):
"""Checking Reserved tasks"""
transID = transDict["TransformationID"]
method = "checkReservedTasks"
# Select the tasks which have been in Reserved status for more than 1 hour for selected transformations
condDict = {"TransformationID": transID, "ExternalStatus": "Reserved"}
time_stamp_older = str(datetime.datetime.utcnow() - datetime.timedelta(hours=1))
res = clients["TransformationClient"].getTransformationTasks(condDict=condDict, older=time_stamp_older)
self._logDebug("getTransformationTasks(%s) return value:" % condDict, res, method=method, transID=transID)
if not res["OK"]:
self._logError("Failed to get Reserved tasks:", res["Message"], method=method, transID=transID)
return res
if not res["Value"]:
self._logVerbose("No Reserved tasks found", transID=transID)
return res
reservedTasks = res["Value"]
# Update the reserved tasks
res = clients["TaskManager"].updateTransformationReservedTasks(reservedTasks)
self._logDebug(
"updateTransformationReservedTasks(%s) return value:" % reservedTasks, res, method=method, transID=transID
)
if not res["OK"]:
self._logError(
"Failed to update transformation reserved tasks:", res["Message"], method=method, transID=transID
)
return res
noTasks = res["Value"]["NoTasks"]
taskNameIDs = res["Value"]["TaskNameIDs"]
# For the tasks with no associated request found re-set the status of the task in the transformationDB
if noTasks:
self._logInfo(
"Resetting status of %d tasks to Created as no associated job/request found" % len(noTasks),
method=method,
transID=transID,
)
for taskName in noTasks:
transID, taskID = self._parseTaskName(taskName)
res = clients["TransformationClient"].setTaskStatus(transID, taskID, "Created")
if not res["OK"]:
self._logError(
"Failed to update task status and ID after recovery:",
"%s %s" % (taskName, res["Message"]),
method=method,
transID=transID,
)
return res
# For the tasks for which an associated request was found update the task details in the transformationDB
for taskName, extTaskID in taskNameIDs.items():
transID, taskID = self._parseTaskName(taskName)
self._logInfo(
"Setting status of %s to Submitted with ID %s" % (taskName, extTaskID), method=method, transID=transID
)
setTaskStatusAndWmsID = clients["TransformationClient"].setTaskStatusAndWmsID(
transID, taskID, "Submitted", str(extTaskID)
)
if not setTaskStatusAndWmsID["OK"]:
self._logError(
"Failed to update task status and ID after recovery:",
"%s %s" % (taskName, setTaskStatusAndWmsID["Message"]),
method=method,
transID=transID,
)
return setTaskStatusAndWmsID
return S_OK()
def submitTasks(self, transDict, clients):
"""Submit the tasks to an external system, using the taskManager provided
:param dict transIDOPBody: transformation body
:param dict clients: dictionary of client objects
:return: S_OK/S_ERROR
"""
transID = transDict["TransformationID"]
transBody = transDict["Body"]
owner = transDict["Owner"]
ownerGroup = transDict["OwnerGroup"]
ownerDN = transDict["OwnerDN"]
method = "submitTasks"
# Get all tasks to submit
tasksToSubmit = clients["TransformationClient"].getTasksToSubmit(transID, self.tasksPerLoop)
self._logDebug(
"getTasksToSubmit(%s, %s) return value:" % (transID, self.tasksPerLoop),
tasksToSubmit,
method=method,
transID=transID,
)
if not tasksToSubmit["OK"]:
self._logError("Failed to obtain tasks:", tasksToSubmit["Message"], method=method, transID=transID)
return tasksToSubmit
tasks = tasksToSubmit["Value"]["JobDictionary"]
if not tasks:
self._logVerbose("No tasks found for submission", method=method, transID=transID)
return tasksToSubmit
self._logInfo("Obtained %d tasks for submission" % len(tasks), method=method, transID=transID)
# Prepare tasks and submits them, by chunks
chunkSize = self.maxParametricJobs if self.bulkSubmissionFlag else self.tasksPerLoop
for taskDictChunk in breakDictionaryIntoChunks(tasks, chunkSize):
res = self._prepareAndSubmitAndUpdateTasks(
transID, transBody, taskDictChunk, owner, ownerDN, ownerGroup, clients
)
if not res["OK"]:
return res
self._logVerbose(
"Submitted %d jobs, bulkSubmissionFlag = %s" % (len(taskDictChunk), self.bulkSubmissionFlag)
)
return S_OK()
def _prepareAndSubmitAndUpdateTasks(self, transID, transBody, tasks, owner, ownerDN, ownerGroup, clients):
"""prepare + submit + monitor a dictionary of tasks
:param int transID: transformation ID
:param str transBody: transformation job template
:param dict tasks: dictionary of per task parameters
:param str owner: owner of the transformation
:param str ownerDN: DN of the owner of the transformation
:param str ownerGroup: group of the owner of the transformation
:param dict clients: dictionary of client objects
:return: S_OK/S_ERROR
"""
method = "_prepareAndSubmitAndUpdateTasks"
# prepare tasks
preparedTransformationTasks = clients["TaskManager"].prepareTransformationTasks(
transBody, tasks, owner, ownerGroup, ownerDN, self.bulkSubmissionFlag
)
self._logDebug(
"prepareTransformationTasks return value:", preparedTransformationTasks, method=method, transID=transID
)
if not preparedTransformationTasks["OK"]:
self._logError(
"Failed to prepare tasks", preparedTransformationTasks["Message"], method=method, transID=transID
)
return preparedTransformationTasks
# Submit tasks
res = clients["TaskManager"].submitTransformationTasks(preparedTransformationTasks["Value"])
self._logDebug("submitTransformationTasks return value:", res, method=method, transID=transID)
if not res["OK"]:
self._logError("Failed to submit prepared tasks:", res["Message"], method=method, transID=transID)
return res
# Update tasks after submission
res = clients["TaskManager"].updateDBAfterTaskSubmission(res["Value"])
self._logDebug("updateDBAfterTaskSubmission return value:", res, method=method, transID=transID)
if not res["OK"]:
self._logError("Failed to update DB after task submission:", res["Message"], method=method, transID=transID)
return res
return S_OK()
@staticmethod
def _addOperationForTransformations(
operationsOnTransformationDict,
operation,
transformations,
owner=None,
ownerGroup=None,
ownerDN=None,
):
"""Fill the operationsOnTransformationDict"""
transformationIDsAndBodies = (
(
transformation["TransformationID"],
transformation["Body"],
transformation["AuthorDN"],
transformation["AuthorGroup"],
)
for transformation in transformations["Value"]
)
for transID, body, t_ownerDN, t_ownerGroup in transformationIDsAndBodies:
if transID in operationsOnTransformationDict:
operationsOnTransformationDict[transID]["Operations"].append(operation)
else:
operationsOnTransformationDict[transID] = {
"TransformationID": transID,
"Body": body,
"Operations": [operation],
"Owner": owner if owner else getUsernameForDN(t_ownerDN)["Value"],
"OwnerGroup": ownerGroup if owner else t_ownerGroup,
"OwnerDN": ownerDN if owner else t_ownerDN,
}
def __getCredentials(self):
"""Get the credentials to use if ShifterCredentials are set, otherwise do nothing.
This function fills the self.credTuple tuple.
"""
if not self.credentials:
return S_OK()
resCred = Operations().getOptionsDict("/Shifter/%s" % self.credentials)
if not resCred["OK"]:
self.log.error("Cred: Failed to find shifter credentials", self.credentials)
return resCred
owner = resCred["Value"]["User"]
ownerGroup = resCred["Value"]["Group"]
# returns a list
ownerDN = getDNForUsername(owner)["Value"][0]
self.credTuple = (owner, ownerGroup, ownerDN)
self.log.info("Cred: Tasks will be submitted with the credentials %s:%s" % (owner, ownerGroup))
return S_OK()
|
DIRACGrid/DIRAC
|
src/DIRAC/TransformationSystem/Agent/TaskManagerAgentBase.py
|
Python
|
gpl-3.0
| 31,985
|
[
"DIRAC"
] |
6f751afa924e5603373f4eaa36d3d2caa71f98c7d0197879c8566fee77e4c63d
|
##########################################################################
# This file is part of ssm.
#
# ssm is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ssm is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with ssm. If not, see
# <http://www.gnu.org/licenses/>.
#########################################################################
import copy
from Cmodel import Cmodel
class SsmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Ccoder(Cmodel):
"""write the C code from the user input coming from the web interface..."""
def __init__(self, dpkgRoot, dpkg, **kwargs):
Cmodel.__init__(self, dpkgRoot, dpkg, **kwargs)
def get_inc_reset(self, observation):
inc = set()
for x in observation:
if x != "distribution" and x!= 'name' and x !='start':
for e in self.change_user_input(observation[x]):
if e in self.par_inc:
inc.add(e)
return inc
def parameters(self):
"""
Everything needed to create ssm_parameter_t, ssm_state_t and load ssm_input_t
"""
parameters = copy.deepcopy(self.model['inputs'])
for p in parameters:
if 'transformation' in p:
if 'require' in p and 'fields' in p['require']:
#covariates (still have require hash as Cmodel only resolve priors): the transformation has to be done in terms of the name of the name property of the require hash name p['require']['name']
if 'name' not in p['require']:
raise SsmError('the require hash need a name property (the transformation has to be done in terms of this name)')
xify = p['require']['name'] #!!!! the hash need a name property (in addition to datapackage resource and fields)
elif 'require' in p and 'name' in p['require']:
xify = p['require']['name']
else:
xify = p['name']
p['f_user2par'] = self.make_C_term(p['transformation'], True, force_par=True, xify=xify, set_t0=True)
## inverse of the transformation function
## if 'prior' in p and 'name' in p['prior']:
## p['f_par2user'] = self.make_C_term(p['transformation']+ '-' + p['name'], True, inverse=p['prior']['name'], force_par=True, xify=p['name'], set_t0=True)
if 'to_resource' in p:
p['f_2prior'] = self.make_C_term(p['to_resource'], True)
drifts = self.model.get('sde',{})
drifts = drifts and drifts['drift']
#TODO support ode drifts += self.model['ode']
states = self.par_sv + self.par_inc
pars = self.par_sv + self.par_noise + self.par_proc + self.par_disp + self.par_obs + self.par_other
#make C code for f_, f_inv f_der, f_der_inv
for p in drifts:
if 'transformation' in p:
p['f'] = self.make_C_term(p['transformation'], True, human=True, xify=p['name'], set_t0=True)
p['f_inv'] = self.make_C_term(p['transformation']+ '- x', True, inverse=p['name'], human=True, set_t0=True)
p['f_der'] = self.make_C_term(p['transformation'], True, derivate=p['name'], human=True, xify=p['name'], set_t0=True)
p['f_der_inv'] = self.make_C_term(p['f_inv'], True, derivate='x', human=True, set_t0=True)
p['f_der2_inv'] = self.make_C_term(p['f_der_inv'], True, derivate='x', human=True, set_t0=True)
if p['name'] in self.order_parameters:
p['offset_ic'] = self.order_parameters[p['name']]
#sort parameters
#start by making dict:
pdict = {x['name']:x for x in parameters}
sdict = {'diff__' + x['name']: x for x in drifts}
for x in pars:
if x not in pdict:
raise SsmError('An input is missing for parameter %s' %x)
f_remainders = {}
f_remainders_par = {}
f_remainders_var = {}
for x in self.model['populations']:
if 'remainder' in x:
rem = x['remainder']['name']
eq = x['remainder']['pop_size'] + ' - ' + ' - '.join([r for r in x['composition'] if r != rem])
f_remainders[rem] = self.make_C_term(eq, True)
f_remainders_par[rem] = self.make_C_term(eq, True, force_par=True, set_t0=True)
eq = ''
for x_i in x['composition']:
for x_j in x['composition']:
if x_i != rem and x_j != rem :
if eq != '':
eq += ' + '
eq += 'gsl_matrix_get(&Ct.matrix,' + str(self.order_states[x_i]) +',' + str(self.order_states[x_j]) + ')';
f_remainders_var[rem] = eq;
# Initial compartment sizes in cases of no remainder
ic = []
for x in self.model['populations']:
if 'remainder' not in x:
ic.append([self.make_C_term(t, True, force_par=True, set_t0=True) for t in x['composition']])
return {
'map_name2prior_name': self.map_name2prior_name,
'parameters': parameters,
'order_parameters': self.order_parameters,
'order_states': self.order_states,
'drifts': drifts,
'par_sv': self.par_sv,
'states': states,
'remainders': self.remainder,
'f_remainders': f_remainders,
'f_remainders_par': f_remainders_par,
'f_remainders_var': f_remainders_var,
'ic': ic,
'sde': [sdict[x] for x in self.par_diff],
'pars': [pdict[x] for x in pars]
}
def observed(self):
obs = copy.deepcopy(self.obs_model)
for x in obs:
if x['distribution'] == 'discretized_normal':
x['mean'] = self.make_C_term(x['mean'], True)
x['sd'] = self.make_C_term(x['sd'], True)
elif x['distribution'] == 'poisson':
x['mean'] = self.make_C_term(x['mean'], True)
x['sd'] = self.make_C_term(x['sd'], True)
elif x['distribution'] == 'binomial':
x['mean'] = self.make_C_term(x['mean'], True)
x['sd'] = self.make_C_term(x['sd'], True)
x['p'] = self.make_C_term(x['p'], True)
x['n'] = self.make_C_term(x['n'], True)
return {'observed': obs}
def iterators(self):
return {
'state': {
'sv': [self.order_states[x] for x in self.par_sv],
'remainders': [self.order_states[x] for x in self.remainder],
'inc': [self.order_states[x] for x in self.par_inc],
'sv_inc': [self.order_states[x] for x in (self.par_sv + self.par_inc)],
'diff': [self.order_states[x] for x in self.par_diff]
},
'parameter': {
'all': [self.order_parameters[x] for x in (self.par_sv + self.par_noise + self.par_proc + self.par_disp + self.par_obs + self.par_other)],
'noise': [self.order_parameters[x] for x in self.par_noise],
'disp': [self.order_parameters[x] for x in self.par_disp],
'icsv': [self.order_parameters[x] for x in self.par_sv],
'icdiff': [self.order_parameters[x.split('diff__')[1]] for x in self.par_diff]
}
}
def orders(self):
"""
#define and #undef
"""
univ_rem = ['U']
if self.remainder:
univ_rem += self.remainder
return {
'var': [{'name': x, 'order': self.order_parameters[x]} for x in (self.par_sv + self.par_noise + self.par_proc + self.par_disp + self.par_obs + self.par_other)],
'diff': [{'name': x, 'order': o} for o,x in enumerate(self.par_diff) ],
'inc': [{'name': x, 'order': self.order_states[x]} for x in self.par_inc],
'covariates': [{'name': x, 'order': o} for o,x in enumerate(self.par_forced)] ,
'univ_rem': [{'name': x, 'order': len(self.par_sv)+o} for o,x in enumerate(univ_rem) ]
}
def cache_special_function_C(self, caches_C, sf=None, prefix='_sf'):
"""caches_C: List of cached expression in C
caches_C is modified in place
sf: an optional list of unique special function to be cached
returns sf (created if sf input is None)
"""
if not sf:
sf = []
for term in caches_C:
if any([x in term for x in self.special_functions]):
terms = self.change_user_input(term)
ind = 0
while (ind < len(terms)):
if terms[ind] in self.special_functions:
f = terms[ind] + '('
ind += 2 #skip first parenthesis
pos = 1 #counter for open parenthesis
while pos > 0:
if terms[ind] == '(':
pos += 1
if terms[ind] == ')':
pos -= 1
f += terms[ind]
ind +=1
sf.append(f)
else:
ind += 1
sf = list(set(sf))
for i, term in enumerate(caches_C):
if any([x in term for x in self.special_functions]):
for s in sf:
caches_C[i] = caches_C[i].replace(s, prefix + '[{0}]'.format(sf.index(s)))
return sf
def alloc_psr(self):
Clist = []
univ = ['U']
if self.remainder:
univ += self.remainder
for s in self.par_sv + univ:
nbreac = len([r for r in self.proc_model if r['from']==s]) +1 ##+1 to stay in the same compartment or to declare smtg in case of no reaction (not super clean but makes C code easier...)
Clist.append({'state':s, 'nb_reaction': nbreac})
return Clist
def step_psr(self):
"""
prob and update for Poisson with stochastic rate step function
prob general structure:
sum=...;
if(sum>0.0){
prob[0]=(1-exp(-sum))*(rate/sum);
...
prob[last]=1-sum(prob);
}
else{
prob[0]=0;
...
prob[last]=1;
}
we need the if statement to avoid division by 0
"""
###########
## prob ##
###########
proc_model = copy.deepcopy(self.proc_model) ##we are going to modify it...
##make the rates noisy (if needed e.g r
rates = set()
for r in proc_model:
if r['from'] not in (['U'] + self.remainder):
myrate = r['rate']
if 'white_noise' in r:
myrate = '({0})*{1}'.format(myrate, r['white_noise']['name'])
rates.add(myrate)
rates = list(rates)
caches = map(lambda x: self.make_C_term(x, False), rates)
sf = self.cache_special_function_C(caches)
for r in proc_model:
if r['from'] not in (['U'] + self.remainder):
myrate = r['rate']
if 'white_noise' in r:
myrate = '({0})*{1}'.format(myrate, r['white_noise']['name'])
r['ind_cache'] = rates.index(myrate)
Ccode=''
for s in self.par_sv:
myexit = [r for r in proc_model if r['from'] == s]
exitlist=[]
if len(myexit)>0:
for e in myexit:
exitlist.append('_r[{0}]*dt'.format(e['ind_cache']))
Csum= 'sum = ' + '+'.join(exitlist) + ';\n'
Ccode += Csum+ 'if(sum>0.0){\none_minus_exp_sum = (1.0-exp(-sum));\n'
Cprob=''
sumprob='1.0'
for reacnb in range(len(exitlist)):
Cprob += 'calc->prob[ORDER_{0}][{1}] = one_minus_exp_sum*(({2})/sum);\n'.format(s, reacnb, exitlist[reacnb])
sumprob += ' - calc->prob[ORDER_{0}][{1}]'.format(s, reacnb)
Cprob += 'calc->prob[ORDER_{0}][{1}] = '.format(s,len(exitlist)) + sumprob + ';\n'
Ccode += Cprob+ '}\n'
Ccode +='else{\n'
Celse=''
for reacnb in range(len(exitlist)):
Celse += 'calc->prob[ORDER_{0}][{1}] = 0.0;\n'.format(s, reacnb)
Celse += 'calc->prob[ORDER_{0}][{1}] = 1.0;\n'.format(s,len(exitlist))+'}\n\n'
Ccode += Celse
############
## update ##
############
incDict = dict([(x,'') for x in self.par_sv])
for s in self.par_sv: ##stay in the same compartment
myexit = [r for r in self.proc_model if r['from'] == s]
if len(myexit)>0: ##only if you can exit from this compartment in this case the remaining has a sense
incDict[s] += 'calc->inc[ORDER_{0}][{1}]'.format(s, len(myexit))
else:
incDict[s] += 'X[ORDER_{0}]'.format(s)
for s in self.par_sv: #come in from other compartments
myinput = [r for r in self.proc_model if r['from'] == s]
for nbreac in range(len(myinput)):
if myinput[nbreac]['to'] not in (['U'] + self.remainder): ##we exclude deaths or transitions to remainder in the update
incDict[myinput[nbreac]['to']] += ' + calc->inc[ORDER_{0}][{1}]'.format(myinput[nbreac]['from'], nbreac)
##we add flow from (['U'] + self.remainder) (Poisson term). We want to cache those flow so that the incidences can be computed
poisson = []
for s in (['U'] + self.remainder):
reac_from_univ = [r for r in self.proc_model if (r['from'] == s and (r['to'] not in (['U'] + self.remainder)) )]
for nbreac in range(len(reac_from_univ)):
myrate = self.make_C_term(reac_from_univ[nbreac]['rate'], False)
if 'white_noise' in reac_from_univ[nbreac]:
myrate = '({0})*{1}'.format(myrate, reac_from_univ[nbreac]['white_noise']['name'])
poisson.append('calc->inc[ORDER_{0}][{1}] = gsl_ran_poisson(calc->randgsl, ({2})*dt)'.format(s, nbreac, myrate))
incDict[reac_from_univ[nbreac]['to']] += ' + calc->inc[ORDER_{0}][{1}]'.format(s, nbreac)
Cstring=''
for s in self.par_sv:
Cstring += 'X[ORDER_{0}] = {1};\n'.format(s, incDict[s])
return {'code': Ccode, 'caches': caches, 'sf': sf, 'poisson': poisson, 'update_code': Cstring}
def step_psr_inc(self):
"""generate C code to compute the dynamic of the observed
**incidence** in case of stochastic models (euler multinomial)
and put in into
Clist = [{'right_hand_side': }]
"""
Clist = []
for i in range(len(self.par_inc_def)):
right_hand_side=''
for j in range(len(self.par_inc_def[i])):
id_out = [self.proc_model.index(r) for r in self.proc_model if ((r['from'] == self.par_inc_def[i][j]['from']) and (r['to'] == self.par_inc_def[i][j]['to']) and (r['rate'] == self.par_inc_def[i][j]['rate']))]
for o in id_out:
myexit = [r for r in self.proc_model if r['from']==self.proc_model[o]['from']]
right_hand_side += ' + calc->inc[ORDER_{0}][{1}]'.format(self.par_inc_def[i][j]['from'], myexit.index(self.proc_model[o]))
Clist.append({'index': i, 'right_hand_side':right_hand_side})
return Clist
def step_psr_multinomial(self):
draw = []
for s in self.par_sv:
nbexit = len([r for r in self.proc_model if r['from']==s])
if nbexit>0:
draw.append({'state': s, 'nb_exit': nbexit+1}) ##+1 to stay in the compartment
return draw
def step_ode_sde(self):
"""
Generates ODE and SDEs
note: sf are used in self.jac() for Lyapunov exp computations
"""
proc_model = copy.deepcopy(self.proc_model) ##we are going to modify it...
odeDict = dict([(x, []) for x in self.par_sv])
rates = list(set(r['rate'] for r in proc_model))
caches = map(lambda x: self.make_C_term(x, True), rates)
sf = self.cache_special_function_C(caches, prefix='_sf')
for i, r in enumerate(proc_model):
r['ind_cache'] = rates.index(r['rate'])
r['ind_dem_sto'] = i
def get_rhs_term(sign, cached, reaction):
if 'white_noise' in reaction:
noise_id = reaction['white_noise']['name']
noise_sd = self.toC(reaction['white_noise']['sd'], False)
else:
noise_id = None
noise_sd= None
return {'sign': sign, 'term': cached, 'noise_id': noise_id, 'noise_sd': noise_sd, 'ind_dem_sto': reaction['ind_dem_sto']}
################################
##Dynamic of the state variables
################################
##outputs
for r in proc_model:
if r['from'] not in (['U'] + self.remainder):
cached = '_r[{0}]*X[ORDER_{1}]'.format(r['ind_cache'], r['from'])
odeDict[r['from']].append(get_rhs_term('-', cached, r))
##inputs
for r in proc_model:
if r['to'] not in (['U'] + self.remainder):
if r['from'] not in (['U'] + self.remainder):
cached = '_r[{0}]*X[ORDER_{1}]'.format(r['ind_cache'], r['from'])
else:
cached= '_r[{0}]'.format(r['ind_cache'])
odeDict[r['to']].append(get_rhs_term('+', cached, r))
#######################################
##Dynamic of the observed **incidence**
#######################################
obs_list = []
for i in range(len(self.par_inc_def)):
eq = []
if isinstance(self.par_inc_def[i][0], dict): ##incidence
for j in range(len(self.par_inc_def[i])):
id_out = [proc_model.index(r) for r in proc_model if ((r['from'] == self.par_inc_def[i][j]['from']) and (r['to'] == self.par_inc_def[i][j]['to']) and (r['rate'] == self.par_inc_def[i][j]['rate'])) ]
for o in id_out:
reaction = proc_model[o]
if self.par_inc_def[i][j]['from'] in (['U'] + self.remainder):
cached = '_r[{0}]'.format(reaction['ind_cache'])
else:
cached = '_r[{0}]*X[ORDER_{1}]'.format(reaction['ind_cache'], self.par_inc_def[i][j]['from'])
eq.append(get_rhs_term('+', cached, reaction))
obs_list.append({'index':i, 'eq': eq})
##############################################################################################################
##we create the ODE and 4 versions of the SDE system (no_dem_sto, no_white_noise, no_dem_sto_no_white_noise and full)
##############################################################################################################
unique_noises_id = [x['name'] for x in self.white_noise]
dem_sto_id = ['dem_sto__' +str(i) for i, x in enumerate(self.proc_model)]
def eq_dem_env(eq_list):
eq = '' #deter skeleton
dem = '' #demographic stochasticity
env = '' #env stochasticity
for x in eq_list:
eq += ' {0} ({1})'.format(x['sign'], x['term'])
#dem sto
dem += '{0} sqrt(({1}))*dem_sto__{2}'.format(x['sign'], x['term'], x['ind_dem_sto'])
#env sto
if x['noise_id']:
env += '{0} ({1})*{2}*{3}'.format(x['sign'], x['term'], x['noise_sd'], x['noise_id'])
return (eq, dem, env)
func = {'no_dem_sto': {'proc': {'system':[], 'noises': unique_noises_id},
'obs': []},
'no_white_noise': {'proc': {'system':[], 'noises': dem_sto_id},
'obs': []},
'full': {'proc': {'system':[], 'noises': dem_sto_id + unique_noises_id},
'obs': []},
'no_dem_sto_no_white_noise': {'proc':{'system':[], 'noises':[]},
'obs':[]},
'ode': {'proc':{'system':[], 'noises':[]},
'obs':[]}}
#state variables
for i, s in enumerate(self.par_sv):
eq, dem, env = eq_dem_env(odeDict[s])
if env:
env = '+ ' + env
#TODO get rid of the 'dt' for Euler Maruyama (should be handled on the C side as it is the case for sqrt(dt) for the stochastic part)'
func['ode']['proc']['system'].append({'index': i, 'eq': eq})
func['no_dem_sto_no_white_noise']['proc']['system'].append({'index': i, 'eq': '({0})*dt'.format(eq)})
func['no_dem_sto']['proc']['system'].append({'index': i, 'eq': '({0})*dt {1}'.format(eq, env)})
func['no_white_noise']['proc']['system'].append({'index': i, 'eq': '({0})*dt + {1}'.format(eq, dem)})
func['full']['proc']['system'].append({'index': i, 'eq': '({0})*dt + {1} {2}'.format(eq, dem, env)})
#observed incidence
for myobs in obs_list:
eq, dem, env = eq_dem_env(myobs['eq'])
if env:
env = ' + ' + env
#TODO get rid of the 'dt' for Euler Maruyama (should be handled on the C side as it is the case for sqrt(dt) for the stochastic part)'
func['ode']['obs'].append({'index': myobs['index'], 'eq': eq})
func['no_dem_sto_no_white_noise']['obs'].append({'index': myobs['index'], 'eq': '({0})*dt'.format(eq)})
func['no_dem_sto']['obs'].append({'index': myobs['index'], 'eq': '({0})*dt {1}'.format(eq, env)})
func['no_white_noise']['obs'].append({'index': myobs['index'], 'eq': '({0})*dt {1}'.format(eq, dem)})
func['full']['obs'].append({'index': myobs['index'], 'eq': '({0})*dt + {1} {2}'.format(eq, dem, env)})
return {'func': func, 'caches': caches, 'sf': sf}
def compute_diff(self):
sde = self.model.get('sde',{})
if sde and 'dispersion' in sde:
dispersion = sde['dispersion']
diff = {}
diff['terms'] = []
diff['n_browns'] = len(dispersion[0])
for x in dispersion:
term = ''
for i, y in enumerate(x):
if y:
term += (' + ' if term else '') + self.make_C_term(y, True) + ' * _w[{0}]'.format(i)
diff['terms'].append(term)
return diff
else:
return []
def jac(self, sf_jac_only):
"""compute jacobian matrix of the process model (including
observed variable) using Sympy
sf_jac_only: list of cached special function generated by
self.print_ode() used to get the index of caches_C for the
jacobian matrix of simulation methods
"""
my_model = copy.deepcopy(self.proc_model)
odeDict = dict([(x,'') for x in self.par_sv])
##############################
### Build odeDict
##############################
##outputs
for r in my_model:
if r['from'] not in (['U'] + self.remainder):
rate= ' - (({0})*{1})'.format(r['rate'], r['from'])
odeDict[r['from']] += rate
##inputs
for r in my_model:
if r['to'] not in (['U'] + self.remainder):
if r['from'] not in (['U'] + self.remainder):
rate= ' + (({0})*{1})'.format(r['rate'], r['from'])
odeDict[r['to']] += rate
else:
rate= ' + ({0})'.format(r['rate'])
odeDict[r['to']] += rate
##observed equations
obsList = []
for i in range(len(self.par_inc_def)):
eq = ''
for j in range(len(self.par_inc_def[i])):
reaction = self.par_inc_def[i][j]
if reaction['from'] in (['U'] + self.remainder):
eq += ' + ({0})'.format(reaction['rate'])
else:
eq += ' + (({0})*{1})'.format(reaction['rate'], reaction['from'])
obsList.append(eq)
####################
### Jacobian
####################
##derive process model equations (odeDict) per par_sv
caches = []
caches_jac_only = []
jac = []
jac_only = []
jac_diff = []
for s in range(len(self.par_sv)):
jac.append([])
jac_only.append([])
if self.par_diff:
jac_diff.append([])
for sy in self.par_sv:
Cterm = self.make_C_term(odeDict[self.par_sv[s]], True, derivate=sy)
jac[s].append(Cterm)
jac_only[s].append(Cterm)
caches.append(Cterm)
caches_jac_only.append(Cterm)
#see doc of kalman.c diff_derivative()
for sy in self.par_diff:
Cterm = self.make_C_term(odeDict[self.par_sv[s]], True, derivate=sy.split('diff__')[1])
jac_diff[s].append({'value': Cterm,
'der': self.make_C_term(sy, True),
'name': sy,
'order': self.order_states[sy]})
caches.append(Cterm)
##derive observation equations (obsList) per par_sv
jac_obs = []
jac_obs_diff = []
for o in range(len(obsList)):
jac_obs.append([])
if self.par_diff:
jac_obs_diff.append([])
for sy in self.par_sv:
Cterm = self.make_C_term(obsList[o], True, derivate=sy)
jac_obs[o].append(Cterm)
caches.append(Cterm)
#see doc of kalman.c diff_derivative()
for sy in self.par_diff:
Cterm = self.make_C_term(obsList[o], True, derivate=sy.split('diff__')[1])
jac_obs_diff[o].append({'value': Cterm,
'der': self.make_C_term(sy, True),
'name': sy,
'order': self.order_states[sy]})
caches.append(Cterm)
##cache rates and remove duplicates
caches = list(set(caches))
caches_jac_only = list(set(caches_jac_only))
##replace with index of caches (will be _r[index] in C)
for s in range(len(self.par_sv)):
for i in range(len(self.par_sv)):
Cterm = jac[s][i]
jac[s][i] = caches.index(Cterm)
jac_only[s][i] = caches_jac_only.index(Cterm)
for i in range(len(self.par_diff)):
jac_diff[s][i]['value'] = caches.index(jac_diff[s][i]['value'])
for o in range(len(obsList)):
for i in range(len(self.par_sv)):
jac_obs[o][i] = caches.index(jac_obs[o][i])
for i in range(len(self.par_diff)):
jac_obs_diff[o][i]['value'] = caches.index(jac_obs_diff[o][i]['value'])
##special function that have to be cached (caches is transformed by self.cache_special_function_)
sf = self.cache_special_function_C(caches, prefix='_sf')
##for jac_only (used for Lyapunov exp computations only, sf is shared with the one of print_ode. We just update caches_jac_only)
self.cache_special_function_C(caches_jac_only, sf=sf_jac_only, prefix='_sf')
return {'jac_only': jac_only,
'jac': jac,
'jac_obs': jac_obs,
'jac_diff': jac_diff,
'jac_obs_diff': jac_obs_diff,
'caches': caches,
'sf': sf,
'caches_jac_only': caches_jac_only}
def Ht(self):
"""compute jacobian matrix of the mean of the obs process (assumed to be Gaussian) using Sympy"""
proc_model = copy.deepcopy(self.proc_model) ##we are going to modify it...
obs = copy.deepcopy(self.obs_model)
N_REAC = len(proc_model)
N_PAR_SV = len(self.par_sv)
N_PAR_INC = len(self.par_inc)
N_DIFF = len(self.par_diff)
Ht_sv = []
Ht_inc = []
Ht_diff = []
## Derivatives of observed means against state variables
for s in range(len(self.par_sv)):
Ht_sv.append([])
for x in obs:
Cterm = self.make_C_term(x['mean'], True, derivate=self.par_sv[s])
Ht_sv[s].append(Cterm)
## Derivatives of observed means against incidence variables
for s in range(len(self.par_inc)):
Ht_inc.append([])
for x in obs:
Cterm = self.make_C_term(x['mean'], True, derivate=self.par_inc[s])
Ht_inc[s].append(Cterm)
## Derivatives of observed means against diffusing variables
for s in range(len(self.par_diff)):
Ht_diff.append([])
for x in obs:
Cterm = self.make_C_term(x['mean'], True, derivate=self.par_diff[s])
Ht_diff[s].append(Cterm)
return {'Ht_sv': Ht_sv,
'Ht_inc': Ht_inc,
'Ht_diff': Ht_diff}
def h_grads(self):
"""compute the gradients of the observation functions using Sympy in order to compute the prediction variance through first-order Taylor expansions"""
obs = copy.deepcopy(self.obs_model)
h_grads = {}
for x in obs:
term = {}
term['name'] = x['name']
term['grads'] = []
for s in (self.par_sv + self.par_inc + self.par_diff):
Cterm = self.make_C_term(x['mean'], True, derivate=s if 'diff__' not in s else s.split('diff__')[1])
if Cterm!='0':
grad = {}
grad['Cterm'] = Cterm
grad['ind'] = self.order_states[s]
term['grads'].append(grad)
h_grads[x['name']]=term
return {'h_grads': h_grads}
def eval_Q(self, debug = False):
"""
The construction of Qsv is based on three levels:
- states: state variables and observations (s)
- reactions (r)
- noise terms (n)
At the reaction level, Qr is a two-blocks diagonal matrix: Qr_dem and Qr_env.
Qr_dem corresponds to demographic noise and has reaction rates on the diagonal.
Qr_env corresponds to white noises. It is built from Qn through Lr.
Qn is a diagonal matrix which diagonal terms correspond to squarred amplitude of white noises.
The stoechiometric matrices L are used to switch from one level to another:
Qr_env = Lr Qn Lr' and Qs = Ls Qr Ls'
In particular, Lr has reaction rates in term (i,j) if reaction i is concerned by white noise j.
Ls has +1 or -1 in term (i,j) if reaction j goes to or leaves from state i, and O's everywhere else.
Note: we assume only one environmental noise term per reaction
"""
proc_model = copy.deepcopy(self.proc_model) ##we are going to modify it...
N_REAC = len(proc_model)
N_PAR_SV = len(self.par_sv)
N_PAR_INC = len(self.par_inc)
N_DIFF = len(self.par_diff)
unique_noises_names = [x['name'] for x in self.white_noise]
N_ENV_STO_UNIQUE = len(unique_noises_names)
##add sd and order properties to noisy reactions
N_ENV_STO = 0
for reaction in proc_model:
if 'white_noise' in reaction:
reaction['order_env_sto_unique'] = unique_noises_names.index(reaction['white_noise']['name'])
reaction['order_env_sto'] = N_ENV_STO
N_ENV_STO += 1
s = N_REAC + N_ENV_STO ## number of noise terms (potentially non-independent)
##for demographic stochasticity, one independent noise term per reaction
Ls = [[0]*s for x in range(N_PAR_SV + N_PAR_INC)]
Qs = [[0]*(N_PAR_SV + N_PAR_INC) for x in range(N_PAR_SV + N_PAR_INC)]
Qr = [[0]*s for x in range(s)]
Qr_dem = [[0]*s for x in range(N_REAC)]
Qr_sto = [[0]*s for x in range(N_ENV_STO)]
Lr = [[0]*N_ENV_STO_UNIQUE for x in range(N_ENV_STO)]
Qn = [[0]*N_ENV_STO_UNIQUE for x in range(N_ENV_STO_UNIQUE)]
###########################################
# Create Ls and Qr_dem #
###########################################
#state variables
for B_dem_ind, r in enumerate(proc_model):
is_noise = 'white_noise' in r
if is_noise:
B_sto_ind = N_REAC + r['order_env_sto']
if r['from'] not in (['U'] + self.remainder):
i = self.par_sv.index(r['from'])
Ls[i][B_dem_ind] -= 1 ##demographic stochasticity
if is_noise:
Ls[i][B_sto_ind] -= 1 ##env stochasticity
Qc_term = '({0})*{1}'.format(r['rate'], r['from'])
else:
Qc_term = r['rate']
if r['to'] not in (['U'] + self.remainder):
i = self.par_sv.index(r['to'])
Ls[i][B_dem_ind] += 1
if is_noise:
Ls[i][B_sto_ind] += 1
Qr_dem[B_dem_ind][B_dem_ind] = Qc_term
# incidence variables
for i in range(len(self.par_inc_def)): #(for every incidence variable)
for B_dem_ind, r in enumerate(proc_model):
# for every incidence
for inc in self.par_inc_def[i]:
# if it involves incidence
if (r['from'] == inc['from']) and (r['to'] == inc['to']) and (r['rate'] == inc['rate']):
Ls[N_PAR_SV + i][B_dem_ind] += 1
if 'white_noise' in r:
B_sto_ind = N_REAC + r['order_env_sto']
Ls[N_PAR_SV + i][B_sto_ind] += 1
############################
## Create Qr_env = Lr Qn Lr'
############################
for r in proc_model:
if 'white_noise' in r:
if r['from'] not in (['U'] + self.remainder):
Qn_term = '({0})*{1}'.format(r['rate'], r['from'])
else:
Qn_term = r['rate']
Lr[r['order_env_sto']][r['order_env_sto_unique']] = Qn_term
Qn[r['order_env_sto_unique']][r['order_env_sto_unique']] = '({0})**2'.format(r['white_noise']['sd'])
def matrix_product(A, B):
if not A or not B:
return []
res = [[0]*len(B[0]) for x in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
if (A[i][k] and B[k][j]):
term = ('({0})*({1})').format(A[i][k], B[k][j])
if res[i][j]:
res[i][j] = res[i][j] + ' + {0}'.format(term)
else:
res[i][j] = term
return res
Qr_env = matrix_product(Lr, Qn)
Qr_env = matrix_product(Qr_env, zip(*Lr))
for i in range(N_ENV_STO):
for j in range(N_ENV_STO):
Qr[N_REAC+i][N_REAC+j] = Qr_env[i][j]
#we fill Qr with Qc_dem and Qc_env
for i in range(N_REAC):
for j in range(N_REAC):
Qr[i][j] = Qr_dem[i][j]
#we split Ls into Ls_dem and Ls_env
Ls_dem = [[0]*N_REAC for x in range(N_PAR_SV + N_PAR_INC)]
for i in range(N_PAR_SV + N_PAR_INC):
for j in range(N_REAC):
Ls_dem[i][j] = Ls[i][j]
Ls_env = [[0]*N_ENV_STO for x in range(N_PAR_SV + N_PAR_INC)]
for i in range(N_PAR_SV + N_PAR_INC):
for j in range(N_ENV_STO):
Ls_env[i][j] = Ls[i][N_REAC + j]
############################
## Create Q_sde
############################
sde = self.model.get('sde', {})
if 'dispersion' in sde:
sde = self.model['sde']
dispersion = sde['dispersion']
# Q_sde = dispersion * dispersion'
Q_sde = matrix_product(dispersion, zip(*dispersion))
#####################################################################################
##we create 4 versions of Q (no_dem_sto, no_env_sto, no_dem_sto_no_env_sto and full)
#####################################################################################
Qs = matrix_product(Ls, Qr)
Qs = matrix_product(Qs, zip(*Ls))
Qs_dem = matrix_product(Ls_dem, Qr_dem)
Qs_dem = matrix_product(Qs_dem, zip(*Ls_dem))
Qs_env = matrix_product(Ls_env, Qr_env)
Qs_env = matrix_product(Qs_env, zip(*Ls_env))
# calc_Q contains different components of Q depending on the absence / presence
# of demographic and environmental noise.
#
# Q is made of:
# - Q_proc, terms regarding exclusively proc state variables of the compartmental model
# - Q_inc, terms regarding at least one incidence state variable of the compartmental model
# - Q_sde, terms regarding the stochastic differential equation
#
# note: Q_cm (compartmental model) is the union of Q_proc and Q_inc terms.
#
# | Q_proc Q_inc 0 | | Q_cm Q_cm 0 |
# Q = | Q_inc Q_inc 0 | = | Q_cm Q_cm 0 |
# | 0 0 Q_sde | | 0 0 Q_sde |
calc_Q = {'no_dem_sto': {'Q_proc':[],
'Q_inc':[],
'Q_cm': Qs_env,
'Q_sde': []},
'no_env_sto': {'Q_proc':[],
'Q_inc':[],
'Q_cm': Qs_dem,
'Q_sde': []},
'full': {'Q_proc':[],
'Q_inc':[],
'Q_cm': Qs,
'Q_sde': []},
'no_dem_sto_no_env_sto':{'Q_proc':[],
'Q_inc':[],
'Q_cm': [],
'Q_sde': []}}
if debug:
for k in calc_Q:
print '\n\nNon null term of Q_'+ k
print "sv:"
for i, x in enumerate(self.par_sv):
print i, x
print "obs:"
for i, x in enumerate(self.par_inc_def):
print N_PAR_SV+ i, x
for i in range(len(calc_Q[k]['Q_cm'])):
for j in range(i+1):
if calc_Q[k]['Q_cm'][i][j]:
print '----------'
#print Q[i][j]
print 'Q_cm[{0}][{1}]: '.format(i, j), self.make_C_term(calc_Q[k]['Q_cm'][i][j], True, human=True)
if i != j:
print 'Q_cm[{0}][{1}] == Q_cm[{1}][{0}]: '.format(i, j), self.make_C_term(calc_Q[k]['Q_cm'][i][j], True, human=True) == self.make_C_term(calc_Q[k]['Q_cm'][j][i], True, human=True)
#convert in a version easy to template in C
#Note that we only template the lower triangle (Q is symmetrical)
for k, tpl in calc_Q.iteritems():
if tpl['Q_cm']:
for i in range(len(tpl['Q_cm'])):
for j in range(i+1):
if tpl['Q_cm'][i][j]:
if i< N_PAR_SV and j < N_PAR_SV:
tpl['Q_proc'].append({'i': i, 'j': j, 'term': self.make_C_term(tpl['Q_cm'][i][j], True)})
else:
tpl['Q_inc'].append({'i': {'is_inc': False, 'ind': i} if i < N_PAR_SV else {'is_inc': True, 'ind': i - N_PAR_SV},
'j': {'is_inc': False, 'ind': j} if j < N_PAR_SV else {'is_inc': True, 'ind': j - N_PAR_SV},
'term': self.make_C_term(tpl['Q_cm'][i][j], True)})
if sde:
for i in range(len(Q_sde)):
for j in range(i+1):
if Q_sde[i][j]:
tpl['Q_sde'].append({'i': i, 'j': j, 'term': self.make_C_term(Q_sde[i][j], True)})
##cache special functions
# for the moment, we only cache the terms contained in Q_cm.
# TODO: cache Q_sde terms.
for key in calc_Q:
if calc_Q[key]['Q_cm']:
optim_rates_proc = [x['term'] for x in calc_Q[key]['Q_proc']]
optim_rates_inc = [x['term'] for x in calc_Q[key]['Q_inc']]
optim_rates = optim_rates_proc + optim_rates_inc
calc_Q[key]['sf'] = self.cache_special_function_C(optim_rates, prefix='_sf')
for i in range(len(optim_rates_proc)):
calc_Q[key]['Q_proc'][i]['term'] = optim_rates[i]
n_proc = len(optim_rates_proc)
for i in range(len(optim_rates_inc)):
calc_Q[key]['Q_inc'][i]['term'] = optim_rates[n_proc + i]
else:
calc_Q[key]['sf'] = []
return calc_Q
if __name__=="__main__":
import json
import os
dpkgRoot = os.path.join('..' ,'examples', 'noise')
dpkg = json.load(open(os.path.join(dpkgRoot, 'ssm.json')))
m = Ccoder(dpkgRoot, dpkg)
|
ntncmch/ssm
|
src/Ccoder.py
|
Python
|
gpl-3.0
| 42,947
|
[
"Gaussian"
] |
28e31ba4c769100361d51166000c4c9eb2036bb13d82f2fd2d184c6ccf415180
|
"""
Description of the video:
Mimic of Star Wars' opening title. A text with a (false)
perspective effect goes towards the end of space, on a
background made of stars. Slight fading effect on the text.
"""
import numpy as np
from moviepy.editor import *
from moviepy.video.tools.drawing import color_gradient
from skimage import transform as tf
# RESOLUTION
w = 720
h = w * 9 // 16 # 16/9 screen
moviesize = w, h
# THE RAW TEXT
txt = "\n".join(
[
"A long time ago, in a faraway galaxy,",
"there lived a prince and a princess",
"who had never seen the stars, for they",
"lived deep underground.",
"",
"Many years before, the prince's",
"grandfather had ventured out to the",
"surface and had been burnt to ashes by",
"solar winds.",
"",
"One day, as the princess was coding",
"and the prince was shopping online, a",
"meteor landed just a few megameters",
"from the couple's flat.",
]
)
# Add blanks
txt = 10 * "\n" + txt + 10 * "\n"
# CREATE THE TEXT IMAGE
clip_txt = TextClip(
txt, color="white", align="West", font_size=25, font="Xolonium-Bold", method="label"
)
# SCROLL THE TEXT IMAGE BY CROPPING A MOVING AREA
txt_speed = 27
fl = lambda gf, t: gf(t)[int(txt_speed * t) : int(txt_speed * t) + h, :]
moving_txt = clip_txt.transform(fl, apply_to=["mask"])
# ADD A VANISHING EFFECT ON THE TEXT WITH A GRADIENT MASK
grad = color_gradient(
moving_txt.size, p1=(0, 2 * h / 3), p2=(0, h / 4), color_1=0.0, color_2=1.0
)
gradmask = ImageClip(grad, is_mask=True)
fl = lambda pic: np.minimum(pic, gradmask.img)
moving_txt.mask = moving_txt.mask.image_transform(fl)
# WARP THE TEXT INTO A TRAPEZOID (PERSPECTIVE EFFECT)
def trapzWarp(pic, cx, cy, is_mask=False):
""" Complicated function (will be latex packaged as a fx) """
Y, X = pic.shape[:2]
src = np.array([[0, 0], [X, 0], [X, Y], [0, Y]])
dst = np.array([[cx * X, cy * Y], [(1 - cx) * X, cy * Y], [X, Y], [0, Y]])
tform = tf.ProjectiveTransform()
tform.estimate(src, dst)
im = tf.warp(pic, tform.inverse, output_shape=(Y, X))
return im if is_mask else (im * 255).astype("uint8")
fl_im = lambda pic: trapzWarp(pic, 0.2, 0.3)
fl_mask = lambda pic: trapzWarp(pic, 0.2, 0.3, is_mask=True)
warped_txt = moving_txt.image_transform(fl_im)
warped_txt.mask = warped_txt.mask.image_transform(fl_mask)
# BACKGROUND IMAGE, DARKENED AT 60%
stars = ImageClip("../../videos/stars.jpg")
stars_darkened = stars.image_transform(lambda pic: (0.6 * pic).astype("int16"))
# COMPOSE THE MOVIE
final = CompositeVideoClip(
[stars_darkened, warped_txt.set_pos(("center", "bottom"))], size=moviesize
)
# WRITE TO A FILE
final.with_duration(8).write_videofile("starworms.avi", fps=5)
# This script is heavy (30s of computations to render 8s of video)
"""=====================================================================
CODE FOR THE VIDEO TUTORIAL
We will now code the video tutorial for this video.
When you think about it, it is a code for a video explaining how to
make another video using some code (this is so meta!).
This code uses the variables of the previous code (it should be placed
after that previous code to work).
====================================================================="""
def annotate(clip, txt, txt_color="white", bg_color=(0, 0, 255)):
""" Writes a text at the bottom of the clip. """
txtclip = TextClip(txt, font_size=20, font="Ubuntu-bold", color=txt_color)
txtclip = txtclip.on_color(
(clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, "center")
)
cvc = CompositeVideoClip([clip, txtclip.set_pos((0, "bottom"))])
return cvc.with_duration(clip.duration)
def resizeCenter(clip):
return clip.resize(height=h).set_pos("center")
def composeCenter(clip):
return CompositeVideoClip([clip.set_pos("center")], size=moviesize)
annotated_clips = [
annotate(clip, text)
for clip, text in [
(
composeCenter(resizeCenter(stars)).subclip(0, 3),
"This is a public domain picture of stars",
),
(
CompositeVideoClip([stars], moviesize).subclip(0, 3),
"We only keep one part.",
),
(
CompositeVideoClip([stars_darkened], moviesize).subclip(0, 3),
"We darken it a little.",
),
(
composeCenter(resizeCenter(clip_txt)).subclip(0, 3),
"We generate a text image.",
),
(
composeCenter(moving_txt.with_mask(None)).subclip(6, 9),
"We scroll the text by cropping a moving region of it.",
),
(
composeCenter(gradmask.to_RGB()).subclip(0, 2),
"We add this mask to the clip.",
),
(composeCenter(moving_txt).subclip(6, 9), "Here is the result"),
(
composeCenter(warped_txt).subclip(6, 9),
"We now warp this clip in a trapezoid.",
),
(final.subclip(6, 9), "We finally superimpose with the stars."),
]
]
# Concatenate and write to a file
concatenate_videoclips(annotated_clips).write_videofile("tutorial.avi", fps=5)
|
kerstin/moviepy
|
examples/star_worms.py
|
Python
|
mit
| 5,225
|
[
"Galaxy"
] |
06bc57d518a81d0f7c05b855da6bdf7bffe27ecb832f5c582804bb9e0f6bb02e
|
"""Move files to local Galaxy upload directory and add to Galaxy Data Libraries.
Required configurable variables in upload:
dir
"""
from __future__ import print_function
import collections
import os
import shutil
import time
from bcbio import utils
from bcbio.log import logger
from bcbio.upload import filesystem
from bcbio.pipeline import qcsummary
# Avoid bioblend import errors, raising at time of use
try:
import bioblend
from bioblend.galaxy import GalaxyInstance
import simplejson
except ImportError:
GalaxyInstance, bioblend, simplejson = None, None, None
def update_file(finfo, sample_info, config):
"""Update file in Galaxy data libraries.
"""
if GalaxyInstance is None:
raise ImportError("Could not import bioblend.galaxy")
if "dir" not in config:
raise ValueError("Galaxy upload requires `dir` parameter in config specifying the "
"shared filesystem path to move files to.")
if "outputs" in config:
_galaxy_tool_copy(finfo, config["outputs"])
else:
_galaxy_library_upload(finfo, sample_info, config)
def _galaxy_tool_copy(finfo, outputs):
"""Copy information directly to pre-defined outputs from a Galaxy tool.
XXX Needs generalization
"""
tool_map = {"align": "bam", "variants": "vcf.gz"}
for galaxy_key, finfo_type in tool_map.items():
if galaxy_key in outputs and finfo.get("type") == finfo_type:
shutil.copy(finfo["path"], outputs[galaxy_key])
def _galaxy_library_upload(finfo, sample_info, config):
"""Upload results to galaxy library.
"""
folder_name = "%s_%s" % (config["fc_date"], config["fc_name"])
storage_dir = utils.safe_makedir(os.path.join(config["dir"], folder_name))
if finfo.get("type") == "directory":
storage_file = None
if finfo.get("ext") == "qc":
pdf_file = qcsummary.prep_pdf(finfo["path"], config)
if pdf_file:
finfo["path"] = pdf_file
finfo["type"] = "pdf"
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
else:
storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)
if "galaxy_url" in config and "galaxy_api_key" in config:
galaxy_url = config["galaxy_url"]
if not galaxy_url.endswith("/"):
galaxy_url += "/"
gi = GalaxyInstance(galaxy_url, config["galaxy_api_key"])
else:
raise ValueError("Galaxy upload requires `galaxy_url` and `galaxy_api_key` in config")
if storage_file and sample_info and not finfo.get("index", False) and not finfo.get("plus", False):
_to_datalibrary_safe(storage_file, gi, folder_name, sample_info, config)
def _to_datalibrary_safe(fname, gi, folder_name, sample_info, config):
"""Upload with retries for intermittent JSON failures.
"""
num_tries = 0
max_tries = 5
while 1:
try:
_to_datalibrary(fname, gi, folder_name, sample_info, config)
break
except (simplejson.scanner.JSONDecodeError, bioblend.galaxy.client.ConnectionError) as e:
num_tries += 1
if num_tries > max_tries:
raise
print("Retrying upload, failed with:", str(e))
time.sleep(5)
def _to_datalibrary(fname, gi, folder_name, sample_info, config):
"""Upload a file to a Galaxy data library in a project specific folder.
"""
library = _get_library(gi, sample_info, config)
libitems = gi.libraries.show_library(library.id, contents=True)
folder = _get_folder(gi, folder_name, library, libitems)
_file_to_folder(gi, fname, sample_info, libitems, library, folder)
def _file_to_folder(gi, fname, sample_info, libitems, library, folder):
"""Check if file exists on Galaxy, if not upload to specified folder.
"""
full_name = os.path.join(folder["name"], os.path.basename(fname))
# Handle VCF: Galaxy reports VCF files without the gzip extension
file_type = "vcf_bgzip" if full_name.endswith(".vcf.gz") else "auto"
if full_name.endswith(".vcf.gz"):
full_name = full_name.replace(".vcf.gz", ".vcf")
for item in libitems:
if item["name"] == full_name:
return item
logger.info("Uploading to Galaxy library '%s': %s" % (library.name, full_name))
return gi.libraries.upload_from_galaxy_filesystem(str(library.id), fname, folder_id=str(folder["id"]),
link_data_only="link_to_files",
dbkey=sample_info["genome_build"],
file_type=file_type,
roles=str(library.roles) if library.roles else None)
def _get_folder(gi, folder_name, library, libitems):
"""Retrieve or create a folder inside the library with the specified name.
"""
for item in libitems:
if item["type"] == "folder" and item["name"] == "/%s" % folder_name:
return item
return gi.libraries.create_folder(library.id, folder_name)[0]
GalaxyLibrary = collections.namedtuple("GalaxyLibrary", ["id", "name", "roles"])
def _get_library(gi, sample_info, config):
"""Retrieve the appropriate data library for the current user.
"""
galaxy_lib = sample_info.get("galaxy_library",
config.get("galaxy_library"))
role = sample_info.get("galaxy_role",
config.get("galaxy_role"))
if galaxy_lib:
return _get_library_from_name(gi, galaxy_lib, role, sample_info, create=True)
elif config.get("private_libs") or config.get("lab_association") or config.get("researcher"):
return _library_from_nglims(gi, sample_info, config)
else:
raise ValueError("No Galaxy library specified for sample: %s" %
sample_info["description"])
def _get_library_from_name(gi, name, role, sample_info, create=False):
for lib in gi.libraries.get_libraries():
if lib["name"].lower() == name.lower() and not lib.get("deleted", False):
return GalaxyLibrary(lib["id"], lib["name"], role)
if create and name:
logger.info("Creating Galaxy library: '%s'" % name)
lib = gi.libraries.create_library(name)
librole = str(gi.users.get_current_user()["id"] if not role else role)
try:
gi.libraries.set_library_permissions(str(lib["id"]), librole, librole, librole, librole)
# XXX Returns error on Galaxy side but seems to work -- ugly
except:
pass
return GalaxyLibrary(lib["id"], lib["name"], role)
else:
raise ValueError("Could not find Galaxy library matching '%s' for sample %s" %
(name, sample_info["description"]))
def _library_from_nglims(gi, sample_info, config):
"""Retrieve upload library from nglims specified user libraries.
"""
names = [config.get(x, "").strip() for x in ["lab_association", "researcher"]
if config.get(x)]
for name in names:
for ext in ["sequencing", "lab"]:
check_name = "%s %s" % (name.split()[0], ext)
try:
return _get_library_from_name(gi, check_name, None, sample_info)
except ValueError:
pass
check_names = set([x.lower() for x in names])
for libname, role in config["private_libs"]:
# Try to find library for lab or rsearcher
if libname.lower() in check_names:
return _get_library_from_name(gi, libname, role, sample_info)
# default to first private library if available
if len(config.get("private_libs", [])) > 0:
libname, role = config["private_libs"][0]
return _get_library_from_name(gi, libname, role, sample_info)
# otherwise use the lab association or researcher name
elif len(names) > 0:
return _get_library_from_name(gi, names[0], None, sample_info, create=True)
else:
raise ValueError("Could not find Galaxy library for sample %s" % sample_info["description"])
|
brainstorm/bcbio-nextgen
|
bcbio/upload/galaxy.py
|
Python
|
mit
| 8,148
|
[
"Galaxy"
] |
fd7f63bb0e0b75c7ffb54ce4cb2be469c625d9e4c98fb63725fecf0b02ef3370
|
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from copy import deepcopy
import os
import subprocess
import pysam
import random
# DO NOT SEED
#random.seed("3337")
def parse_options(description, epilog):
parser = ArgumentParser(description=description, formatter_class=RawDescriptionHelpFormatter, epilog=epilog)
parser.add_argument('input_file', type=str, help='')
parser.add_argument('contigs', type=str, help='The contigs to PRESERVE separated by "," ... THESE MUST BE SORTED (e.g. 5,20,21 is okay ; 20,5,21 is not okay)')
parser.add_argument('output_file', type=str, help='')
# Process arguments
args = parser.parse_args()
return args
def main():
args = parse_options("Tool to reheader a (copy of a) bam to only the specified contigs. EXPERIMENTAL and UNSUPPORTED", """WARNING: No error checking.
Assumes that the bam is coordinate sorted and paired-end.
java -jar picard.jar ReplaceSamHeader HEADER=tmp_header.sam I=<(cat <(head -n1 tmp_header.sam ) <(samtools view tumor_1_foo.bam)) O=yossi.bam CREATE_INDEX=true VALIDATION_STRINGENCY=SILENT
THIS SCRIPT CAN BE VERY SLOW ON LARGE BAMS
This script is mostly meant for generating test bams that validate (even in Picard).
Example: python reheader_bam.py some.bam 20,21 some_20_21_only.bam""")
input_file = args.input_file
output_file = args.output_file
contigs = args.contigs.split(",")
contigs = set([c.strip() for c in contigs])
# 1) BAM -> Filtered SAM w/ old header
random_num = random.random()
samfile_out_filename_step1 = os.path.dirname(os.path.abspath(output_file)) + "/tmp_step_1_" + str(random_num) + ".sam"
bamfile_in = pysam.AlignmentFile(input_file, 'rb')
samfile_out = pysam.AlignmentFile(samfile_out_filename_step1, 'wh', header=bamfile_in.header)
for i,r in enumerate(bamfile_in):
if r.next_reference_name in contigs:
try:
s_mate = bamfile_in.mate(r)
samfile_out.write(r)
except ValueError as ve:
if ve.message.find("fetch called") != -1 or ve.message.find("fetching by") != -1:
raise ve
if i % 100 == 0:
print(str(i) + " reads... (" + input_file + ")")
samfile_out.close()
# 2) SAM Filtered -> new header only SAM file
# samfile_out_filename is the output from previous step which is input now.
samfile_in = pysam.AlignmentFile(samfile_out_filename_step1, 'r')
samfile_out_filename_step2 = os.path.dirname(os.path.abspath(output_file)) + "/tmp_step_2_" + str(random_num) + ".sam"
# Cosntruct the header-only sam file
old_header = deepcopy(samfile_in.header)
new_sq = [c for c in old_header['SQ'] if c['SN'] in contigs]
new_header = deepcopy(old_header)
new_header['SQ'] = new_sq
samfile_out = pysam.AlignmentFile(samfile_out_filename_step2, 'wh', header=new_header)
samfile_out.close()
# 3) SAM Filtered + new header only SAM file -> BAM
bamfile_out_filename_step3 = output_file
header_only_in = pysam.AlignmentFile(samfile_out_filename_step2, 'r')
# Create a dictionary, so that we can lookup the SN and get the index.
sn_to_index_dict = dict()
for i, entry in enumerate(header_only_in.header['SQ']):
sn_to_index_dict[entry['SN']] = i
samfile_in = pysam.AlignmentFile(samfile_out_filename_step1, 'r')
bamfile_out = pysam.AlignmentFile(bamfile_out_filename_step3, 'wb', header=header_only_in.header)
for r in samfile_in:
r.reference_id = sn_to_index_dict[r.reference_name]
r.next_reference_id = sn_to_index_dict[r.next_reference_name]
bamfile_out.write(r)
bamfile_out.close()
subprocess.call("samtools index " + bamfile_out_filename_step3, shell=True)
if __name__ == "__main__":
main()
|
broadinstitute/hellbender
|
scripts/unsupported/reheader_bam/reheader_bam.py
|
Python
|
bsd-3-clause
| 3,842
|
[
"pysam"
] |
541593744fc0a55f75e5d01295c5377ec4e12c296b2848f464acf758f06328ba
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
One-dimensional Gamma-Gaussian mixture density classes : Given a set
of points the algo provides approcumate maximum likelihood estimates
of the mixture distribution using an EM algorithm.
Author: Bertrand Thirion and Merlin Keller 2005-2008
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import scipy.stats as st
import scipy.special as sp
#############################################################################
# Auxiliary functions #######################################################
#############################################################################
def _dichopsi_log(u, v, y, eps=0.00001):
""" Implements the dichotomic part of the solution of psi(c)-log(c)=y
"""
if u > v:
u, v = v, u
t = (u + v) / 2
if np.absolute(u - v) < eps:
return t
else:
if sp.psi(t) - np.log(t) > y:
return _dichopsi_log(u, t, y, eps)
else:
return _dichopsi_log(t, v, y, eps)
def _psi_solve(y, eps=0.00001):
""" Solve psi(c)-log(c)=y by dichotomy
"""
if y > 0:
print("y", y)
raise ValueError("y>0, the problem cannot be solved")
u = 1.
if y > sp.psi(u) - np.log(u):
while sp.psi(u) - np.log(u) < y:
u *= 2
u /= 2
else:
while sp.psi(u) - np.log(u) > y:
u /= 2
return _dichopsi_log(u, 2 * u, y, eps)
def _compute_c(x, z, eps=0.00001):
"""
this function returns the mle of the shape parameter if a 1D gamma
density
"""
eps = 1.e-7
y = np.dot(z, np.log(x)) / np.sum(z) - np.log(np.dot(z, x) / np.sum(z))
if y > - eps:
c = 10
else:
c = _psi_solve(y, eps=0.00001)
return c
def _gaus_dens(mean, var, x):
""" evaluate the gaussian density (mean,var) at points x
"""
Q = - (x - mean) ** 2 / (2 * var)
return 1. / np.sqrt(2 * np.pi * var) * np.exp(Q)
def _gam_dens(shape, scale, x):
"""evaluate the gamma density (shape,scale) at points x
Notes
-----
Returns 0 on negative subspace
"""
ng = np.zeros(np.size(x))
cst = - shape * np.log(scale) - sp.gammaln(shape)
i = np.ravel(np.nonzero(x > 0))
if np.size(i) > 0:
lz = cst + (shape - 1) * np.log(x[i]) - x[i] / scale
ng[i] = np.exp(lz)
return ng
def _gam_param(x, z):
""" Compute the parameters of a gamma density from data weighted points
Parameters
----------
x: array of shape(nbitem) the learning points
z: array of shape(nbitem), their membership within the class
Notes
-----
if no point is positive then the couple (1, 1) is returned
"""
eps = 1.e-5
i = np.ravel(np.nonzero(x > 0))
szi = np.sum(z[i])
if szi > 0:
shape = _compute_c(x[i], z[i], eps)
scale = np.dot(x[i], z[i]) / (szi * shape)
else:
shape = 1
scale = 1
return shape, scale
##############################################################################
# class `Gamma`
##############################################################################
class Gamma(object):
""" Basic one dimensional Gaussian-Gamma Mixture estimation class
Note that it can work with positive or negative values,
as long as there is at least one positive value.
NB : The gamma distribution is defined only on positive values.
5 parameters are used:
- mean: gaussian mean
- var: gaussian variance
- shape: gamma shape
- scale: gamma scale
- mixt: mixture parameter (weight of the gamma)
"""
def __init__(self, shape=1, scale=1):
self.shape = shape
self.scale = scale
def parameters(self):
print("shape: ", self.shape, "scale: ", self.scale)
def check(self, x):
if (x.min() < 0):
raise ValueError("negative values in input")
def estimate(self, x, eps=1.e-7):
"""
ML estimation of the Gamma parameters
"""
self.check(x)
n = np.size(x)
y = np.sum(np.log(x)) / n - np.log(np.sum(x) / n)
if y > - eps:
self.shape = 1
else:
self.shape = _psi_solve(y)
self.scale = np.sum(x) / (n * self.shape)
##############################################################################
# Gamma-Gaussian Mixture class
##############################################################################
class GGM(object):
"""
This is the basic one dimensional Gaussian-Gamma Mixture estimation class
Note that it can work with positive or negative values,
as long as there is at least one positive value.
NB : The gamma distribution is defined only on positive values.
5 scalar members
- mean: gaussian mean
- var: gaussian variance (non-negative)
- shape: gamma shape (non-negative)
- scale: gamma scale (non-negative)
- mixt: mixture parameter (non-negative, weight of the gamma)
"""
def __init__(self, shape=1, scale=1, mean=0, var=1, mixt=0.5):
self.shape = shape
self.scale = scale
self.mean = mean
self.var = var
self.mixt = mixt
def parameters(self):
""" print the paramteres of self
"""
print("Gaussian: mean: ", self.mean, "variance: ", self.var)
print("Gamma: shape: ", self.shape, "scale: ", self.scale)
print("Mixture gamma: ", self.mixt, "Gaussian: ", 1 - self.mixt)
def Mstep(self, x, z):
"""
Mstep of the model: maximum likelihood
estimation of the parameters of the model
Parameters
----------
x : array of shape (nbitems,)
input data
z array of shape(nbitrems, 2)
the membership matrix
"""
# z[0,:] is the likelihood to be generated by the gamma
# z[1,:] is the likelihood to be generated by the gaussian
tiny = 1.e-15
sz = np.maximum(tiny, np.sum(z, 0))
self.shape, self.scale = _gam_param(x, z[:, 0])
self.mean = np.dot(x, z[:, 1]) / sz[1]
self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1]
self.mixt = sz[0] / np.size(x)
def Estep(self, x):
"""
E step of the estimation:
Estimation of ata membsership
Parameters
----------
x: array of shape (nbitems,)
input data
Returns
-------
z: array of shape (nbitems, 2)
the membership matrix
"""
eps = 1.e-15
z = np.zeros((np.size(x), 2), 'd')
z[:, 0] = _gam_dens(self.shape, self.scale, x)
z[:, 1] = _gaus_dens(self.mean, self.var, x)
z = z * np.array([self.mixt, 1. - self.mixt])
sz = np.maximum(np.sum(z, 1), eps)
L = np.sum(np.log(sz)) / np.size(x)
z = (z.T / sz).T
return z, L
def estimate(self, x, niter=10, delta=0.0001, verbose=False):
""" Complete EM estimation procedure
Parameters
----------
x : array of shape (nbitems,)
the data to be processed
niter : int, optional
max nb of iterations
delta : float, optional
criterion for convergence
verbose : bool, optional
If True, print values during iterations
Returns
-------
LL, float
average final log-likelihood
"""
if x.max() < 0:
# all the values are generated by the Gaussian
self.mean = np.mean(x)
self.var = np.var(x)
self.mixt = 0.
L = 0.5 * (1 + np.log(2 * np.pi * self.var))
return L
# proceed with standard estimate
z, L = self.Estep(x)
L0 = L - 2 * delta
for i in range(niter):
self.Mstep(x, z)
z, L = self.Estep(x)
if verbose:
print(i, L)
if (L < L0 + delta):
break
L0 = L
return L
def show(self, x):
""" Visualization of the mm based on the empirical histogram of x
Parameters
----------
x : array of shape (nbitems,)
the data to be processed
"""
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
h, c = np.histogram(x, bins)
h = h.astype(np.float) / np.size(x)
p = self.mixt
dc = c[1] - c[0]
y = (1 - p) * _gaus_dens(self.mean, self.var, c) * dc
z = np.zeros(np.size(c))
z = _gam_dens(self.shape, self.scale, c) * p * dc
import matplotlib.pylab as mp
mp.figure()
mp.plot(0.5 * (c[1:] + c[:-1]), h)
mp.plot(c, y, 'r')
mp.plot(c, z, 'g')
mp.plot(c, z + y, 'k')
mp.title('Fit of the density with a Gamma-Gaussians mixture')
mp.legend(('data', 'gaussian acomponent', 'gamma component',
'mixture distribution'))
def posterior(self, x):
"""Posterior probability of observing the data x for each component
Parameters
----------
x: array of shape (nbitems,)
the data to be processed
Returns
-------
y, pg : arrays of shape (nbitem)
the posterior probability
"""
p = self.mixt
pg = p * _gam_dens(self.shape, self.scale, x)
y = (1 - p) * _gaus_dens(self.mean, self.var, x)
return y / (y + pg), pg / (y + pg)
##############################################################################
# double-Gamma-Gaussian Mixture class
##############################################################################
class GGGM(object):
"""
The basic one dimensional Gamma-Gaussian-Gamma Mixture estimation
class, where the first gamma has a negative sign, while the second
one has a positive sign.
7 parameters are used:
- shape_n: negative gamma shape
- scale_n: negative gamma scale
- mean: gaussian mean
- var: gaussian variance
- shape_p: positive gamma shape
- scale_p: positive gamma scale
- mixt: array of mixture parameter
(weights of the n-gamma,gaussian and p-gamma)
"""
def __init__(self, shape_n=1, scale_n=1, mean=0, var=1,
shape_p=1, scale_p=1, mixt=np.array([1.0, 1.0, 1.0]) / 3):
""" Constructor
Parameters
-----------
shape_n : float, optional
scale_n: float, optional
parameters of the nehative gamma; must be positive
mean : float, optional
var : float, optional
parameters of the gaussian ; var must be positive
shape_p : float, optional
scale_p : float, optional
parameters of the positive gamma; must be positive
mixt : array of shape (3,), optional
the mixing proportions; they should be positive and sum to 1
"""
self.shape_n = shape_n
self.scale_n = scale_n
self.mean = mean
self.var = var
self.shape_p = shape_p
self.scale_p = scale_p
self.mixt = mixt
def parameters(self):
""" Print the parameters
"""
print("Negative Gamma: shape: ", self.shape_n,
"scale: ", self.scale_n)
print("Gaussian: mean: ", self.mean, "variance: ", self.var)
print("Positive Gamma: shape: ", self.shape_p, "scale: ", self.scale_p)
mixt = self.mixt
print("Mixture neg. gamma: ", mixt[0], "Gaussian: ", mixt[1],
"pos. gamma: ", mixt[2])
def init(self, x, mixt=None):
"""
initialization of the different parameters
Parameters
----------
x: array of shape(nbitems)
the data to be processed
mixt : None or array of shape(3), optional
prior mixing proportions. If None, the classes have equal weight
"""
if mixt is not None:
if np.size(mixt) == 3:
self.mixt = np.ravel(mixt)
else:
raise ValueError('bad size for mixt')
# gaussian
self.mean = np.mean(x)
self.var = np.var(x)
# negative gamma
i = np.ravel(np.nonzero(x < 0))
if np.size(i) > 0:
mn = - np.mean(x[i])
vn = np.var(x[i])
self.scale_n = vn / mn
self.shape_n = mn ** 2 / vn
else:
self.mixt[0] = 0
# positive gamma
i = np.ravel(np.nonzero(x > 0))
if np.size(i) > 0:
mp = np.mean(x[i])
vp = np.var(x[i])
self.scale_p = vp / mp
self.shape_p = mp ** 2 / vp
else:
self.mixt[2] = 0
# mixing proportions
self.mixt = self.mixt / np.sum(self.mixt)
def init_fdr(self, x, dof=-1, copy=True):
"""
Initialization of the class based on a fdr heuristic: the
probability to be in the positive component is proportional to
the 'positive fdr' of the data. The same holds for the
negative part. The point is that the gamma parts should model
nothing more that the tails of the distribution.
Parameters
----------
x: array of shape(nbitem)
the data under consideration
dof: integer, optional
number of degrees of freedom if x is thought to be a student
variate. By default, it is handeled as a normal
copy: boolean, optional
If True, copy the data.
"""
# Safeguard ourselves against modifications of x, both by our
# code, and by external code.
if copy:
x = x.copy()
# positive gamma
i = np.ravel(np.nonzero(x > 0))
from .empirical_pvalue import fdr
if np.size(i) > 0:
if dof < 0:
pvals = st.norm.sf(x)
else:
pvals = st.t.sf(x, dof)
q = fdr(pvals)
z = 1 - q[i]
self.mixt[2] = np.maximum(0.5, z.sum()) / np.size(x)
self.shape_p, self.scale_p = _gam_param(x[i], z)
else:
self.mixt[2] = 0
# negative gamma
i = np.ravel(np.nonzero(x < 0))
if np.size(i) > 0:
if dof < 0:
pvals = st.norm.cdf(x)
else:
pvals = st.t.cdf(x, dof)
q = fdr(pvals)
z = 1 - q[i]
self.shape_n, self.scale_n = _gam_param( - x[i], z)
self.mixt[0] = np.maximum(0.5, z.sum()) / np.size(x)
else:
self.mixt[0] = 0
self.mixt[1] = 1 - self.mixt[0] - self.mixt[2]
def Mstep(self, x, z):
"""
Mstep of the estimation:
Maximum likelihood update the parameters of the three components
Parameters
------------
x: array of shape (nbitem,)
input data
z: array of shape (nbitems,3)
probabilistic membership
"""
tiny = 1.e-15
sz = np.maximum(np.sum(z, 0), tiny)
self.mixt = sz / np.sum(sz)
# negative gamma
self.shape_n, self.scale_n = _gam_param( - x, z[:, 0])
# gaussian
self.mean = np.dot(x, z[:, 1]) / sz[1]
self.var = np.dot((x - self.mean) ** 2, z[:, 1]) / sz[1]
# positive gamma
self.shape_p, self.scale_p = _gam_param(x, z[:, 2])
def Estep(self, x):
""" Update probabilistic memberships of the three components
Parameters
----------
x: array of shape (nbitems,)
the input data
Returns
-------
z: ndarray of shape (nbitems, 3)
probabilistic membership
Notes
-----
z[0,:] is the membership the negative gamma
z[1,:] is the membership of the gaussian
z[2,:] is the membership of the positive gamma
"""
tiny = 1.e-15
z = np.array(self.component_likelihood(x)).T * self.mixt
sz = np.maximum(tiny, np.sum(z, 1))
L = np.mean(np.log(sz))
z = (z.T / sz).T
return z, L
def estimate(self, x, niter=100, delta=1.e-4, bias=0, verbose=0,
gaussian_mix=0):
""" Whole EM estimation procedure:
Parameters
----------
x: array of shape (nbitem)
input data
niter: integer, optional
max number of iterations
delta: float, optional
increment in LL at which convergence is declared
bias: float, optional
lower bound on the gaussian variance (to avoid shrinkage)
gaussian_mix: float, optional
if nonzero, lower bound on the gaussian mixing weight
(to avoid shrinkage)
verbose: 0, 1 or 2
verbosity level
Returns
-------
z: array of shape (nbitem, 3)
the membership matrix
"""
z, L = self.Estep(x)
L0 = L - 2 * delta
for i in range(niter):
self.Mstep(x, z)
# Constraint the Gaussian variance
if bias > 0:
self.var = np.maximum(bias, self.var)
# Constraint the Gaussian mixing ratio
if gaussian_mix > 0 and self.mixt[1] < gaussian_mix:
upper, gaussian, lower = self.mixt
upper_to_lower = upper / (lower + upper)
gaussian = gaussian_mix
upper = (1 - gaussian_mix) * upper_to_lower
lower = 1 - gaussian_mix - upper
self.mixt = lower, gaussian, upper
z, L = self.Estep(x)
if verbose:
print(i, L)
if (L < L0 + delta):
break
L0 = L
return z
def posterior(self, x):
"""
Compute the posterior probability of the three components
given the data
Parameters
-----------
x: array of shape (nbitem,)
the data under evaluation
Returns
--------
ng,y,pg: three arrays of shape(nbitem)
the posteriori of the 3 components given the data
Notes
-----
ng + y + pg = np.ones(nbitem)
"""
p = self.mixt
ng, y, pg = self.component_likelihood(x)
total = ng * p[0] + y * p[1] + pg * p[2]
return ng * p[0] / total, y * p[1] / total, pg * p[2] / total
def component_likelihood(self, x):
"""
Compute the likelihood of the data x under
the three components negative gamma, gaussina, positive gaussian
Parameters
-----------
x: array of shape (nbitem,)
the data under evaluation
Returns
--------
ng,y,pg: three arrays of shape(nbitem)
The likelihood of the data under the 3 components
"""
ng = _gam_dens(self.shape_n, self.scale_n, - x)
y = _gaus_dens(self.mean, self.var, x)
pg = _gam_dens(self.shape_p, self.scale_p, x)
return ng, y, pg
def show(self, x, mpaxes=None):
""" Visualization of mixture shown on the empirical histogram of x
Parameters
----------
x: ndarray of shape (nditem,)
data
mpaxes: matplotlib axes, optional
axes handle used for the plot if None, new axes are created.
"""
import matplotlib.pylab as mp
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
h, c = np.histogram(x, bins)
h = h.astype(np.float) / np.size(x)
dc = c[1] - c[0]
ng = self.mixt[0] * _gam_dens(self.shape_n, self.scale_n, - c)
y = self.mixt[1] * _gaus_dens(self.mean, self.var, c)
pg = self.mixt[2] * _gam_dens(self.shape_p, self.scale_p, c)
z = y + pg + ng
if mpaxes is None:
mp.figure()
ax = mp.subplot(1, 1, 1)
else:
ax = mpaxes
ax.plot(0.5 * (c[1:] + c[:-1]), h / dc, linewidth=2, label='data')
ax.plot(c, ng, 'c', linewidth=2, label='negative gamma component')
ax.plot(c, y, 'r', linewidth=2, label='Gaussian component')
ax.plot(c, pg, 'g', linewidth=2, label='positive gamma component')
ax.plot(c, z, 'k', linewidth=2, label='mixture distribution')
ax.set_title('Fit of the density with a Gamma-Gaussian mixture',
fontsize=12)
l = ax.legend()
for t in l.get_texts():
t.set_fontsize(12)
ax.set_xticklabels(ax.get_xticks(), fontsize=12)
ax.set_yticklabels(ax.get_yticks(), fontsize=12)
|
ellisdg/3DUnetCNN
|
unet3d/utils/nipy/ggmixture.py
|
Python
|
mit
| 20,840
|
[
"Gaussian"
] |
64b1ad101a0cc9cee7751fb36b5d2216676d591158e0e7e22c242ff5b18a0529
|
from ase.lattice import bulk
from gpaw import GPAW
from gpaw.wavefunctions.pw import PW
from gpaw.xc.exx import EXX
from ase.parallel import paropen
resultfile = paropen('si.pbe+exx.results.txt', 'a')
# Plane wave cutoff
pwcutoff = 600.0
# NxNxN k-point sampling, gamma-centred grid
k = 8
# Si lattice constant
alat = 5.421
# Do the bulk calculation
bulk_crystal = bulk('Si', 'diamond', a=alat)
bulk_calc = GPAW(
mode = PW(pwcutoff),
kpts={'size': (k, k, k), 'gamma': True},
dtype=complex,
xc='PBE',
txt='si.pbe+exx.pbe_output.txt',
parallel={'band':1}
)
bulk_crystal.set_calculator(bulk_calc)
e0_bulk_pbe = bulk_crystal.get_potential_energy()
# Write to file
bulk_calc.write('bulk.gpw',mode='all')
# Now the exact exchange
exx_bulk = EXX('bulk.gpw', txt='si.pbe+exx.exx_output.txt')
exx_bulk.calculate()
e0_bulk_exx = exx_bulk.get_total_energy()
s = str(alat)
s += ' '
s += str(k)
s += ' '
s += str(pwcutoff)
s += ' '
s += str(e0_bulk_pbe)
s += ' '
s += str(e0_bulk_exx)
s += '\n'
resultfile.write(s)
|
robwarm/gpaw-symm
|
doc/exercises/rpa/si.pbe+exx.py
|
Python
|
gpl-3.0
| 1,101
|
[
"ASE",
"GPAW"
] |
683d257b1e48a8b4b249ca5c8f937d40c06228a1b69cc9d8e089500e315ed5d3
|
# howtoplay.py -- a basic help screen for angrydd
# Copyright 2004 Joe Wreschnig <piman@sacredchao.net>
# Released under the terms of the GNU GPL v2.
__revision__ = "$Id: howtoplay.py 286 2004-09-04 03:51:59Z piman $"
import pygame; from pygame import transform
import characters; from characters import Character
import config
import textfx
import wipes
import load
from events import EventManager
from boxes import SpecialSprite
from constants import *
# Display the "how to play" screen (with appropriate unlocks enabled).
# FIXME: This is pretty messy; the text belongs in a data file rather
# than inline.
def init(*args):
em = EventManager()
screen = pygame.display.get_surface()
move_snd = load.sound("select-move.wav")
index = 0
# The format is up to three paragraphs per screen, displayed
# left, right, left,
texts = [
["In Angry, Drunken Dwarves, you are an angry, drunken dwarf. Why "
"are you so angry? Who knows. But you've decided to take your "
"aggression out on other dwarves, by dropping gems on their "
"heads.",
"Multicolored gems will fall from the top of the screen. You "
"should try to arrange them into groups by color. The arrow keys "
"or a/d and j/l will move them left and right. s or k will make "
"then fall faster, and q/e and u/o will rotate them.",
"As gems reach the bottom, they will land and sit in place. If you "
"put enough gems of the same color in a rectangle shape, they will "
"merge and form a crystal."
],
["Less common are break gems. When you drop a break gem on a gem "
"or crystal of the same color, it will destroy any gems of that "
"color adjacent to it. Crystals are worth much more than an equal "
"number of regular gems.",
"When you destroy gems on your side, it will drop counter gems "
"onto your opponent. These are harder to destroy, but turn into "
"regular gems after a few turns. You can try to cancel an incoming "
"attack by breaking gems yourself, but you'll need to break twice "
"as many as are coming.",
"Rarer is the diamond. When you drop a diamond onto something, "
"it will destroy all gems of that color on your side, even "
"counter gems! Be careful though, because a crystal "
"destroyed by a diamond isn't worth any more than normal gems."
],
["Each dwarf has a drop gem pattern, and an attack strength. The drop "
"pattern is a display of what kinds of gems will be dropped when "
"you attack your opponent. Small attacks will drop the same "
"row over and over, which will probably help them!",
"The attack strength is a measurement of how strong your drop "
"pattern is; drops with weirder patterns are harder to plan "
"against. Before gems are dropped, they are multiplied by this "
"attack strength, and so the damage is scaled up or down.",
"For the most devestating results, try chaining attacks together, "
"so breaking some gems results in even more breaking afterwards. "
"Breaking many crystals in a chain can result in huge drops."
]
]
drop = pygame.Surface([64, 64])
drop.blit(load.block("blue"), [0, 0])
drop.blit(load.block("yellow"), [0, 32])
drop.blit(load.block("green"), [32, 0])
drop.blit(load.block("red"), [32, 32])
crystal = load.gem("green", 4, 3)
breaks = pygame.Surface([64, 64])
breaks.blit(load.block("red", "-crash"), [0, 0])
breaks.blit(load.block("green", "-crash"), [0, 32])
breaks.blit(load.block("yellow", "-crash"), [32, 0])
breaks.blit(load.block("blue", "-crash"), [32, 32])
counters = pygame.Surface([64, 64])
counters.blit(textfx.lettered_box("5", "green"), [0, 0])
counters.blit(textfx.lettered_box("4", "blue"), [32, 0])
counters.blit(textfx.lettered_box("3", "red"), [0, 32])
counters.blit(textfx.lettered_box("2", "yellow"), [32, 32])
chain = pygame.Surface([64, 96])
chain.blit(load.block("blue", "-crash"), [0, 64])
chain.blit(load.block("green"), [32, 64])
chain.blit(load.block("green", "-crash"), [32, 32])
chain.blit(load.block("blue"), [32, 0])
# Followed by up to three images per screen, right, left, right.
images = [
[None, drop, crystal],
[breaks, counters, load.block("diamond")],
[Character.arcade[4].drop.render(), None, chain],
]
if config.getboolean("unlock", "single"):
texts.append([
"In single player mode, rather than competing against someone "
"else, you're racing the clock. You have to clear a certain "
"number of blocks in a certain number of turns, or the ones "
"left get dumped on you.",
"Your field is also twice as big. So it sounds easy, right? "
"Well, to start with, you've got three new colors of gems to "
"contend with: orange, purple, and cyan.",
"If that wasn't bad enough, the number of gems you have to clear "
"goes up much faster than the number of turns you have "
"to do it, so build up those crystals early and save them."
])
newcols = pygame.Surface([64, 64])
newcols.blit(load.block("orange"), [16, 0])
newcols.blit(load.block("cyan"), [0, 32])
newcols.blit(load.block("purple"), [32, 32])
counts = pygame.Surface([130, 100])
box = Character.default.border([40, 40])
text1 = textfx.shadow("new:", 20)
text2 = textfx.shadow("turns:", 20)
num1 = textfx.shadow("122", 30)
num2 = textfx.shadow("10", 30)
counts.blit(box, [0, 20])
counts.blit(box, [70, 20])
counts.blit(text1, text1.get_rect(center = [30, 38]))
counts.blit(text2, text2.get_rect(center = [100, 38]))
counts.blit(num1, [12, 45])
counts.blit(num2, [87, 45])
images.append([None, newcols, counts])
if config.getboolean("unlock", "combat"):
texts.append([
"Combat blocks look like normal gems with a special symbol "
"in the middle. These gems don't form crystals, but otherwise "
"break like normal colored gems. When you break one of these, "
"you 'pick up' the special attack in it.",
"To use the attack, press start (enter/2). There are five basic "
"attacks; from left to right: make your opponent's field blink, "
"clear your own field of all blocks,",
"flip your opponent's screen upside down, disable the 'next' "
"indicator, drop some gray blocks, or reverse your opponent's "
"controls. Blink, flip, and reverse last for a few "
"seconds. Scramble lasts 10 turns."])
blink = load.block("blue")
clear = load.block("red")
rev = load.block("yellow")
flip = load.block("green")
gray = load.block("red")
scram = load.block("purple")
blink.blit(SpecialSprite.load(BLINK), [6, 6])
flip.blit(SpecialSprite.load(FLIP), [6, 6])
clear.blit(SpecialSprite.load(CLEAR), [6, 6])
rev.blit(SpecialSprite.load(REVERSE), [6, 6])
gray.blit(SpecialSprite.load(GRAY), [6, 6])
scram.blit(SpecialSprite.load(SCRAMBLE), [6, 6])
img1 = pygame.Surface([64, 32])
img2 = pygame.Surface([64, 64])
img1.blit(blink, [0, 0])
img1.blit(clear, [32, 0])
img2.blit(flip, [0, 0])
img2.blit(scram, [32, 0])
img2.blit(gray, [0, 32])
img2.blit(rev, [32, 32])
images.append([None, img1, img2])
wipes.wipe_in(render_help_page(texts[index], images[index]))
cont = True
screen.blit(render_help_page(texts[index], images[index]), [0, 0])
img = textfx.shadow("Enter: Menu - Left/Right: Turn Page (%d/%d)" %
(index + 1, len(images)), 18)
screen.blit(img, [785 - img.get_width(), 10])
pygame.display.update()
while cont:
oldindex = index
for ev in em.wait():
if ev.type == QUIT: cont = False
elif ev.type == PLAYER:
if ev.key == CONFIRM: cont = False
elif ev.key == UP or ev.key == LEFT:
index = (index - 1) % len(texts)
move_snd.play()
elif ev.key in [DOWN, RIGHT, ROT_CC, ROT_CW]:
index = (index + 1) % len(texts)
move_snd.play()
if oldindex != index:
screen.blit(render_help_page(texts[index], images[index]), [0, 0])
img = textfx.shadow("Enter: Menu - Left/Right: Turn Page (%d/%d)" %
(index + 1, len(images)), 18)
screen.blit(img, [785 - img.get_width(), 10])
pygame.display.update()
return True
# Render a page in the format used above.
def render_help_page(texts, images):
surf = Character.default.border([780, 580])
font = textfx.WrapFont(32, 450)
for i in range(len(texts)):
text = font.render(texts[i])
image = images[i]
if image:
ri = image.get_rect()
image.set_colorkey(image.get_at([0, 0]))
rt = text.get_rect()
if i & 1:
if image: ri.centerx = 200
rt.centerx = 500
else:
rt.centerx = 300
if image: ri.centerx = 630
if image: ri.centery = 95 + (200 * i)
rt.centery = 95 + (200 * i)
surf.blit(text, rt)
if image: surf.blit(image, ri)
return surf
|
joshuacronemeyer/Angry-Drunken-Programmers
|
howtoplay.py
|
Python
|
gpl-2.0
| 9,742
|
[
"CRYSTAL"
] |
c9265a7810ce6a1da5612e4c39a22429f54287530b89cb4758e9ab1372409278
|
# https://scikit-learn.org/stable/auto_examples/gaussian_process/plot_compare_gpr_krr.html
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {
"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [
ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)
],
}
kr = GridSearchCV(KernelRidge(), param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) + WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using gaussian process regressor
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f" % (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c="k", label="data")
plt.plot(X_plot, np.sin(X_plot), color="navy", lw=lw, label="True")
#plt.plot(X_plot, y_kr, color="turquoise", lw=lw, label="KRR (%s)" % kr.best_params_)
plt.plot(X_plot, y_kr, color="turquoise", lw=lw, label="KRR")
#plt.plot(X_plot, y_gpr, color="darkorange", lw=lw, label="GPR (%s)" % gpr.kernel_)
plt.plot(X_plot, y_gpr, color="darkorange", lw=lw, label="GPR")
plt.fill_between(
X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color="darkorange", alpha=0.2
)
plt.xlabel("data")
plt.ylabel("target")
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title("GPR versus Kernel Ridge")
plt.legend(loc="best", scatterpoints=1, prop={"size": 8})
pml.savefig('krr_vs_gpr.pdf')
plt.show()
|
probml/pyprobml
|
scripts/krr_vs_gpr.py
|
Python
|
mit
| 2,543
|
[
"Gaussian"
] |
b3bbed0b9fe06efdcb9c82bec0d2c70282d3a18287e09d35f4570f6ae9b7794c
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"sales_order_details": "templates/form_grid/item_grid.html"
}
class SalesOrder(SellingController):
tname = 'Sales Order Item'
fname = 'sales_order_details'
person_tname = 'Target Detail'
partner_tname = 'Partner Target Detail'
territory_tname = 'Territory Target Detail'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0]:
frappe.msgprint(_("Warning: Sales Order {0} already exists against same Purchase Order number").format(so[0][0]))
def validate_for_items(self):
check_list, flag = [], 0
chk_dupl_itm = []
for d in self.get('sales_order_details'):
e = [d.item_code, d.description, d.warehouse, d.prevdoc_docname or '']
f = [d.item_code, d.description]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.warehouse:
frappe.throw(_("Reserved warehouse required for stock item {0}").format(d.item_code))
if e in check_list:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
chk_dupl_itm.append(f)
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def validate_sales_mntc_quotation(self):
for d in self.get('sales_order_details'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
#self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'sales_order_details')
self.validate_with_previous_doc()
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get(self.fname) if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc(self.tname, {
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get(self.fname)])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
def on_submit(self):
super(SalesOrder, self).on_submit()
self.update_stock_ledger(update_stock = 1)
self.check_credit(self.grand_total)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.grand_total, self)
#self.validate_contract(self.customer)
self.update_prevdoc_status('submit')
frappe.db.set(self, 'status', 'Submitted')
def validate_contract(self,customer):
contract_dates=frappe.db.sql("""select contract_start_date,contract_end_date
from `tabCustomer Contract Form` where Customer='%s'
"""%customer,as_list=1)
# frappe.errprint(contract_dates[0][0])
# frappe.errprint(contract_dates[0][1])
# frappe.errprint(self.transaction_date)
if contract_dates:
if contract_dates[0][0] <= self.transaction_date <= contract_dates[0][1] :
pass
else:
frappe.msgprint("Selected customer contract is expired",raise_exception=1)
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
frappe.db.set(self, 'status', 'Stopped')
frappe.msgprint(_("{0} {1} status is Stopped").format(self.doctype, self.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
frappe.db.set(self, 'status', 'Submitted')
frappe.msgprint(_("{0} {1} status is Unstopped").format(self.doctype, self.name))
def update_stock_ledger(self, update_stock):
from erpnext.stock.utils import update_bin
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.transaction_date,
"voucher_type": self.doctype,
"voucher_no": self.name,
"is_amended": self.amended_from and 'Yes' or 'No'
}
update_bin(args)
def get_portal_page(self):
return "order" if self.docstatus==1 else None
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Vouchers in Sales Invoice Advance
target.get_advances()
# To get process details against sales order for which you are generating sales invoice---------
if source.doctype=='Sales Order':
#frappe.errprint("in sales invocie")
get_shelf_service_details(source,source_name,target)
set_missing_values(source, target)
target.get_advances()
#update_item(source,target,source_parent)
def get_shelf_service_details(source,source_name,target):
process=frappe.db.sql(""" select name from `tabProcess` where get_sales_order='%s'
and docstatus=1 and sales_invoice_status='Not Done'"""%source_name,as_list=1)
if process:
#frappe.errprint(process)
for [name] in process:
create_sales_invoice_item_entry(name,target)
#update_process_entry()
update_sales_order_process_status(source_name)
def update_process_entry(name):
frappe.db.sql("""update `tabProcess` set sales_invoice_status='Done' where
name='%s'"""%name)
frappe.db.commit()
def update_sales_order_process_status(source_name):
frappe.db.sql("""update `tabSales Order` set process_status='Completed' where
name='%s'"""%source_name)
frappe.db.commit()
def create_sales_invoice_item_entry(name,target):
service_details=frappe.db.sql("""select p.process_type, s.qty,s.charge,s.amount,s.process,s.file_name from `tabShelf Ready Service Details` s
inner join `tabProcess` p on s.parent=p.name where s.parent='%s' """%name,as_list=1)
for i in service_details:
#frappe.errprint(target)
si = target.append('entries', {})
si.item_code=i[0]
si.item_name=i[0]
si.description=i[0]
si.qty=i[1]
si.rate=i[2]
si.amount=i[3]
si.shelf_ready_service_name=i[4]
si.marcfile_name=i[5]
si.sales_order=source_name
si.income_account='Sales - D'
si.cost_center='Main - D'
si.process_id= name
#update_process_entry(name)
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
def set_advance_vouchers(source, target):
advance_voucher_list = []
advance_voucher = frappe.db.sql("""
select
t1.name as voucher_no, t1.posting_date, t1.remark, t2.account,
t2.name as voucher_detail_no, {amount_query} as payment_amount, t2.is_advance
from
`tabJournal Voucher` t1, `tabJournal Voucher Detail` t2
""")
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
|
Tejal011089/digitales_erpnext
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 16,661
|
[
"VisIt"
] |
c609c7039bd2da19bbb3d03c82c6d8ee2616f08904367795084831ce8d0aaa0d
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import calendar
from collections import deque, namedtuple
from datetime import datetime
from google.protobuf.descriptor import FieldDescriptor
import pytz
from jormungandr.timezone import get_timezone
from navitiacommon import response_pb2, type_pb2
from builtins import range, zip
from importlib import import_module
import logging
from jormungandr.exceptions import ConfigException, UnableToParse, InvalidArguments
from six.moves.urllib.parse import urlparse
from jormungandr import new_relic
from six.moves import range
from six.moves import zip
from jormungandr.exceptions import TechnicalError
from flask import request
import re
import flask
from contextlib import contextmanager
import functools
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
DATETIME_FORMAT = "%Y%m%dT%H%M%S"
def get_uri_pt_object(pt_object):
if pt_object.embedded_type == type_pb2.ADDRESS:
coords = pt_object.uri.split(';')
return "coord:{}:{}".format(coords[0], coords[1])
return pt_object.uri
def kilometers_to_meters(distance):
return distance * 1000.0
def is_coord(uri):
# for the moment we do a simple check
return get_lon_lat(uri) != (None, None)
def get_lon_lat(uri):
"""
extract lon lat from an uri
the uri should be formated as: 'lon;lat'
>>> get_lon_lat('12.3;-5.3')
(12.3, -5.3)
>>> get_lon_lat('bob')
(None, None)
>>> get_lon_lat('5.3;bob')
(None, None)
>>> get_lon_lat('5.0;0.0')
(5.0, 0.0)
"""
if not uri:
return None, None
if uri.count(';') == 1:
try:
lon, lat = uri.split(';')
# we check that both are float
return float(lon), float(lat)
except ValueError:
return None, None
return None, None
def is_url(url):
if not url or url.strip() == '':
return False
url_parsed = urlparse(url)
return url_parsed.scheme.strip() != '' and url_parsed.netloc.strip() != ''
def str_to_time_stamp(str):
"""
convert a string to a posix timestamp
the string must be in the YYYYMMDDTHHMMSS format
like 20170534T124500
"""
date = datetime.strptime(str, DATETIME_FORMAT)
return date_to_timestamp(date)
def str_to_dt(str):
"""
convert a string to a datetime
the string must be in the YYYYMMDDTHHMMSS format
like 20170534T124500
"""
return datetime.strptime(str, DATETIME_FORMAT)
def date_to_timestamp(date):
"""
convert a datatime objet to a posix timestamp (number of seconds from 1070/1/1)
"""
return int(calendar.timegm(date.utctimetuple()))
def str_datetime_utc_to_local(dt, timezone):
from jormungandr.interfaces.parsers import DateTimeFormat
if dt:
utc_dt = DateTimeFormat()(dt)
else:
utc_dt = datetime.utcnow()
local = pytz.timezone(timezone)
return dt_to_str(utc_dt.replace(tzinfo=pytz.UTC).astimezone(local))
def timestamp_to_datetime(timestamp, tz=None):
"""
Convert a timestamp to datetime
if timestamp > MAX_INT we return None
"""
maxint = 9223372036854775807
# when a date is > 2038-01-19 03:14:07
# we receive a timestamp = 18446744071562142720 (64 bits) > 9223372036854775807 (MAX_INT 32 bits)
# And ValueError: timestamp out of range for platform time_t is raised
if timestamp >= maxint:
return None
dt = datetime.utcfromtimestamp(timestamp)
timezone = tz or get_timezone()
if timezone:
dt = pytz.utc.localize(dt)
return dt.astimezone(timezone)
return None
def dt_to_str(dt):
return dt.strftime(DATETIME_FORMAT)
def timestamp_to_str(timestamp):
dt = timestamp_to_datetime(timestamp)
if dt:
return dt_to_str(dt)
return None
def walk_dict(tree, visitor):
"""
depth first search on a dict.
call the visit(elem) method on the visitor for each node
if the visitor returns True, stop the search
>>> bob = {'tutu': 1,
... 'tata': [1, 2],
... 'toto': {'bob':12, 'bobette': 13, 'nested_bob': {'bob': 3}},
... 'tete': ('tuple1', ['ltuple1', 'ltuple2']),
... 'titi': [{'a':1}, {'b':1}]}
>>> def my_visitor(name, val):
... print("{}={}".format(name, val))
>>> walk_dict(bob, my_visitor)
titi={u'b': 1}
b=1
titi={u'a': 1}
a=1
tete=ltuple2
tete=ltuple1
tete=tuple1
tutu=1
toto={u'bobette': 13, u'bob': 12, u'nested_bob': {u'bob': 3}}
nested_bob={u'bob': 3}
bob=3
bob=12
bobette=13
tata=2
tata=1
>>> def my_stoper_visitor(name, val):
... print("{}={}".format(name, val))
... if name == 'tete':
... return True
>>> walk_dict(bob, my_stoper_visitor)
titi={u'b': 1}
b=1
titi={u'a': 1}
a=1
tete=ltuple2
"""
queue = deque()
def add_elt(name, elt, first=False):
if isinstance(elt, (list, tuple)):
for val in elt:
queue.append((name, val))
elif hasattr(elt, 'items'):
for k, v in elt.items():
queue.append((k, v))
elif first: # for the first elt, we add it even if it is no collection
queue.append((name, elt))
add_elt("main", tree, first=True)
while queue:
elem = queue.pop()
#we don't want to visit the list, we'll visit each node separately
if not isinstance(elem[1], (list, tuple)):
if visitor(elem[0], elem[1]) is True:
#we stop the search if the visitor returns True
break
#for list and tuple, the name is the parent's name
add_elt(elem[0], elem[1])
def walk_protobuf(pb_object, visitor):
"""
Walk on a protobuf and call the visitor for each nodes
>>> journeys = response_pb2.Response()
>>> journey_standard = journeys.journeys.add()
>>> journey_standard.type = "none"
>>> journey_standard.duration = 1
>>> journey_standard.nb_transfers = 2
>>> s = journey_standard.sections.add()
>>> s.duration = 3
>>> s = journey_standard.sections.add()
>>> s.duration = 4
>>> journey_rapid = journeys.journeys.add()
>>> journey_rapid.duration = 5
>>> journey_rapid.nb_transfers = 6
>>> s = journey_rapid.sections.add()
>>> s.duration = 7
>>>
>>> from collections import defaultdict
>>> types_counter = defaultdict(int)
>>> def visitor(name, val):
... types_counter[type(val)] +=1
>>>
>>> walk_protobuf(journeys, visitor)
>>> types_counter[response_pb2.Response]
1
>>> types_counter[response_pb2.Journey]
2
>>> types_counter[response_pb2.Section]
3
>>> types_counter[int] # and 7 int in all
7
"""
queue = deque()
def add_elt(name, elt):
try:
fields = elt.ListFields()
except AttributeError:
return
for field, value in fields:
if field.label == FieldDescriptor.LABEL_REPEATED:
for v in value:
queue.append((field.name, v))
else:
queue.append((field.name, value))
# add_elt("main", pb_object)
queue.append(('main', pb_object))
while queue:
elem = queue.pop()
visitor(elem[0], elem[1])
add_elt(elem[0], elem[1])
def realtime_level_to_pbf(level):
if level == 'base_schedule':
return type_pb2.BASE_SCHEDULE
elif level == 'adapted_schedule':
return type_pb2.ADAPTED_SCHEDULE
elif level == 'realtime':
return type_pb2.REALTIME
else:
raise ValueError('Impossible to convert in pbf')
#we can't use reverse(enumerate(list)) without creating a temporary
#list, so we define our own reverse enumerate
def reverse_enumerate(l):
return zip(xrange(len(l)-1, -1, -1), reversed(l))
def pb_del_if(l, pred):
'''
Delete the elements such as pred(e) is true in a protobuf list.
Return the number of elements deleted.
'''
nb = 0
for i, e in reverse_enumerate(l):
if pred(e):
del l[i]
nb += 1
return nb
def create_object(configuration):
"""
Create an object from a dict
The dict must contains a 'class' key with the class path of the class we want to create
It can contains also an 'args' key with a dictionary of arguments to pass to the constructor
"""
class_path = configuration['class']
kwargs = configuration.get('args', {})
log = logging.getLogger(__name__)
try:
if '.' not in class_path:
log.warn('impossible to build object {}, wrongly formated class'.format(class_path))
raise ConfigException(class_path)
module_path, name = class_path.rsplit('.', 1)
module = import_module(module_path)
attr = getattr(module, name)
except AttributeError as e:
log.warn('impossible to build object {} : {}'.format(class_path, e))
raise ConfigException(class_path)
except ImportError:
log.exception('impossible to build object {}, cannot find class'.format(class_path))
raise ConfigException(class_path)
try:
obj = attr(**kwargs) # call to the contructor, with all the args
except TypeError as e:
log.warn('impossible to build object {}, wrong arguments: {}'.format(class_path, e.message))
raise ConfigException(class_path)
return obj
def generate_id():
import uuid
return uuid.uuid4()
def get_pt_object_coord(pt_object):
"""
Given a PtObject, return the coord according to its embedded_type
:param pt_object: type_pb2.PtObject
:return: coord: type_pb2.GeographicalCoord
>>> pt_object = type_pb2.PtObject()
>>> pt_object.embedded_type = type_pb2.POI
>>> pt_object.poi.coord.lon = 42.42
>>> pt_object.poi.coord.lat = 41.41
>>> coord = get_pt_object_coord(pt_object)
>>> coord.lon
42.42
>>> coord.lat
41.41
"""
if not isinstance(pt_object, type_pb2.PtObject):
logging.getLogger(__name__).error('Invalid pt_object')
raise InvalidArguments('Invalid pt_object')
map_coord = {
type_pb2.STOP_POINT: "stop_point",
type_pb2.STOP_AREA: "stop_area",
type_pb2.ADDRESS: "address",
type_pb2.ADMINISTRATIVE_REGION: "administrative_region",
type_pb2.POI: "poi"
}
attr = getattr(pt_object,
map_coord.get(pt_object.embedded_type, ""),
None)
coord = getattr(attr, "coord", None)
if not coord:
logging.getLogger(__name__).error('Invalid coord for ptobject type: {}'.format(pt_object.embedded_type))
raise UnableToParse('Invalid coord for ptobject type: {}'.format(pt_object.embedded_type))
return coord
def record_external_failure(message, connector_type, connector_name):
params = {'{}_system_id'.format(connector_type): unicode(connector_name), 'message': message}
new_relic.record_custom_event('{}_external_failure'.format(connector_type), params)
def decode_polyline(encoded, precision=6):
'''
Version of : https://developers.google.com/maps/documentation/utilities/polylinealgorithm
But with improved precision
See: https://mapzen.com/documentation/mobility/decoding/#python (valhalla)
http://developers.geovelo.fr/#/documentation/compute (geovelo)
'''
inv = 10**-precision
decoded = []
previous = [0, 0]
i = 0
#for each byte
while i < len(encoded):
#for each coord (lat, lon)
ll = [0, 0]
for j in [0, 1]:
shift = 0
byte = 0x20
#keep decoding bytes until you have this coord
while byte >= 0x20:
byte = ord(encoded[i]) - 63
i += 1
ll[j] |= (byte & 0x1f) << shift
shift += 5
#get the final value adding the previous offset and remember it for the next
ll[j] = previous[j] + (~(ll[j] >> 1) if ll[j] & 1 else (ll[j] >> 1))
previous[j] = ll[j]
#scale by the precision and chop off long coords also flip the positions so
# #its the far more standard lon,lat instead of lat,lon
decoded.append([float('%.6f' % (ll[1] * inv)), float('%.6f' % (ll[0] * inv))])
#hand back the list of coordinates
return decoded
# PeriodExtremity is used to provide a datetime and it's meaning
# datetime: given datetime (obviously)
# represents_start: is True if it's start of period, False if it's the end of period
# (mostly used for fallback management in experimental scenario)
PeriodExtremity = namedtuple('PeriodExtremity', ['datetime', 'represents_start'])
class SectionSorter(object):
def __call__(self, a, b):
if a.begin_date_time != b.begin_date_time:
return -1 if a.begin_date_time < b.begin_date_time else 1
else:
return -1 if a.end_date_time < b.end_date_time else 1
def make_namedtuple(typename, *fields, **fields_with_default):
"""
helper to create a named tuple with some default values
:param typename: name of the type
:param fields: required argument of the named tuple
:param fields_with_default: positional arguments with fields and their default value
:return: the namedtuple
>>> Bob = make_namedtuple('Bob', 'a', 'b', c=2, d=14)
>>> Bob(b=14, a=12)
Bob(a=12, b=14, c=2, d=14)
>>> Bob(14, 12) # non named argument also works
Bob(a=14, b=12, c=2, d=14)
>>> Bob(12, b=14, d=123)
Bob(a=12, b=14, c=2, d=123)
>>> Bob(a=12) # Note: the error message is not the same in python 3 (they are better in python 3)
Traceback (most recent call last):
TypeError: __new__() takes at least 3 arguments (2 given)
>>> Bob()
Traceback (most recent call last):
TypeError: __new__() takes at least 3 arguments (1 given)
"""
import collections
field_names = list(fields) + list(fields_with_default.keys())
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = tuple(fields_with_default.values())
return T
def get_timezone_str(default='Africa/Abidjan'):
try:
timezone = get_timezone()
except TechnicalError:
return default
else:
return timezone.zone if timezone else default
def get_current_datetime_str(is_utc=False):
timezone = 'Africa/Abidjan' if is_utc else get_timezone_str()
current_datetime = request.args.get('_current_datetime')
return str_datetime_utc_to_local(current_datetime, timezone)
def make_timestamp_from_str(strftime):
"""
:param strftime:
:return: double
>>> make_timestamp_from_str("2017-12-25T08:07:59 +01:00")
1514185679
>>> make_timestamp_from_str("20171225T080759+01:00")
1514185679
>>> make_timestamp_from_str("2017-12-25 08:07:59 +01:00")
1514185679
>>> make_timestamp_from_str("20171225T080759Z")
1514189279
"""
from dateutil import parser
import calendar
return calendar.timegm(parser.parse(strftime).utctimetuple())
def get_house_number(housenumber):
hn = 0
numbers = re.findall(r'^\d+', housenumber or "0")
if len(numbers) > 0:
hn = numbers[0]
return int(hn)
# The two following functions allow to use flask request context in greenlet
# The decorator provided by flask (@copy_current_request_context) will generate an assertion error with multiple greenlets
def copy_flask_request_context():
"""
Make a copy of the 'main' flask request conquest to be used with the context manager below
:return: a copy of the current flask request context
"""
# Copy flask request context to be used in greenlet
top = flask._request_ctx_stack.top
if top is None:
raise RuntimeError('This function can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
return top.copy()
@contextmanager
def copy_context_in_greenlet_stack(request_context):
"""
Push a copy of the 'main' flask request context in a global stack created for it.
Pop the copied request context to discard it
ex:
request_context = utils.copy_flask_request_context()
def worker():
with utils.copy_context_in_greenlet_stack(request_context):
# do some work here with flask request context available
gevent.spawn(worker) # Multiples times
:param request_context: a copy of the 'main' flask request context
"""
flask.globals._request_ctx_stack.push(request_context)
yield
flask.globals._request_ctx_stack.pop()
def compose(*funs):
"""
compose functions and return a callable object
example 1:
f(x) = x + 1
g(x) = 2*x
compose(f,g) = g(f(x)) = 2 * (x + 1 )
example 2:
f(a list of integer): returns multiples of 3
g(a list of integer): returns multiples of 5
compose(f,g): returns multiples of 3 AND 5
:param funs:
:return: a lambda
>>> c = compose(lambda x: x+1, lambda x: 2*x)
>>> c(42)
86
>>> f = lambda l: (x for x in l if x%3 == 0)
>>> g = lambda l: (x for x in l if x%5 == 0)
>>> c = compose(f, g)
>>> list(c(range(45)))
[0, 15, 30]
"""
return lambda obj: functools.reduce(lambda prev, f: f(prev), funs, obj)
class ComposedFilter(object):
"""
Compose several filters with convenient interfaces
All filters are evaluated lazily, the first added is the first tested
>>> F = ComposedFilter()
>>> f = F.add_filter(lambda x: x % 2 == 0).add_filter(lambda x: x % 5 == 0).compose_filters()
>>> list(f(range(40)))
[0, 10, 20, 30]
>>> list(f(range(20))) # we can reuse the composed filter
[0, 10]
>>> f = F.add_filter(lambda x: x % 3 == 0).compose_filters() # we can continue on adding new filter
>>> list(f(range(40)))
[0, 30]
"""
def __init__(self):
self.filters = []
def add_filter(self, pred):
self.filters.append(lambda iterable: (i for i in iterable if pred(i)))
return self
def compose_filters(self):
return compose(*self.filters)
def portable_min(*args, **kwargs):
"""
a portable min() for python2 which takes a default value when
the iterable is empty
>>> portable_min([1], default=42)
1
>>> portable_min([], default=42)
42
>>> portable_min(iter(()), default=43) # empty iterable
43
"""
if PY2:
default = kwargs.pop('default', None)
try:
return min(*args, **kwargs)
except ValueError:
return default
except Exception:
raise
if PY3:
return min(*args, **kwargs)
|
kadhikari/navitia
|
source/jormungandr/jormungandr/utils.py
|
Python
|
agpl-3.0
| 19,997
|
[
"VisIt"
] |
28c230fc6334aee10f54d7ad138706696e0e6adf5107a0f9d4c7d041a513f067
|
""" DIRAC.TransformationSystem.Agent package """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
yujikato/DIRAC
|
src/DIRAC/TransformationSystem/Agent/__init__.py
|
Python
|
gpl-3.0
| 158
|
[
"DIRAC"
] |
5b771454a5ec736cf3e14a76791cafde931c15ab422e4687ab765c7b80014921
|
# copyright (c) 2003 Clark Evans
# You may copy this under the terms of any open source license.
#
# Documentation:
#
# A YPath expression is split into a recurisve combination of
# two sorts of items: path segments and predicates.
#
#
#
#
from types import ListType, StringType, IntType, DictType, InstanceType
import re
from urllib import quote
noTarget = object()
def unquote(expr):
"""
summary: >
Simply returns the unquoted string, and the
length of the quoted string token at the
beginning of the expression.
"""
tok = expr[0]
if "'" == tok:
idx = 1
odd = 0
ret = ""
while idx < len(expr):
chr = expr[idx]
if "'" == chr:
if odd: ret += chr
odd = not odd
else:
if odd:
tok = expr[:idx]
break
ret += chr
idx += 1
if "'" == tok: tok = expr
return (ret,len(tok))
if '"' == tok:
idx = 1
esc = 0
while idx < len(expr):
chr = expr[idx]
if '"' == chr and not esc:
tok = expr[:idx] + '"'
break
if '\\' == chr and not esc: esc = 1
else: esc = 0
idx += 1
if '"' == tok:
raise SyntaxError("unmatched quote: " + expr)
ret = eval(tok) #TODO: find better way to unquote
return (ret,len(tok))
return (expr,len(expr))
def escape(node):
"""
summary: >
This function escapes a given key so that it
may appear within a ypath. URI style escaping
is used so that ypath expressions can be a
valid URI expression.
"""
typ = type(node)
if typ is IntType: return str(node)
if typ is StringType:
return quote(node,'')
raise ValueError("TODO: Support more than just string and integer keys.")
class context:
"""
summary: >
A ypath visit context through a YAML rooted graph.
This is implemented as a 3-tuple including the parent
node, the current key/index and the value. This is
an immutable object so it can be cached.
properties:
key: mapping key or index within the parent collection
value: current value within the parent's range
parent: the parent context
root: the very top of the yaml graph
path: a tuple of the domain keys
notes: >
The context class doesn't yet handle going down the
domain side of the tree...
"""
def __init__(self,parent,key,value):
"""
args:
parent: parent context (or None if this is the root)
key: mapping key or index for this context
value: value of current location...
"""
self.parent = parent
self.key = key
self.value = value
if parent:
assert parent.__class__ is self.__class__
self.path = parent.path + (escape(key),)
self.root = parent.root
else:
assert not key
self.path = tuple()
self.root = self
def __setattr__(self,attname,attval):
if attname in ('parent','key','value'):
if self.__dict__.get(attname):
raise ValueError("context is read-only")
self.__dict__[attname] = attval
def __hash__(self): return hash(self.path)
def __cmp__(self,other):
try:
return cmp(self.path,other.path)
except AttributeError:
return -1
def __str__(self):
if self.path:
return "/".join(('',)+self.path)
else:
return '/'
def to_context(target):
if type(target) is InstanceType:
if target.__class__ is context:
return target
return context(None,None,target)
def context_test():
lst = ['value']
map = {'key':lst}
x = context(None,None,map)
y = context(x,'key',lst)
z = context(y,0,'value')
assert ('key',) == y.path
assert 'key' == y.key
assert lst == y.value
assert x == y.parent
assert x == y.root
assert 0 == z.key
assert 'value' == z.value
assert y == z.parent
assert x == z.root
assert hash(x)
assert hash(y)
assert hash(z)
assert '/' == str(x)
assert '/key' == str(y)
assert '/key/0' == str(z)
class null_seg:
"""
summary: >
This is the simplest path segment, it
doesn't return any results and doesn't
depend upon its context. It also happens to
be the base class which all segments derive.
"""
def __iter__(self):
return self
def next_null(self):
raise StopIteration
def bind(self,cntx):
"""
summary: >
The bind function is called whenever
the parent context has changed.
"""
assert(cntx.__class__ is context)
self.cntx = cntx
def apply(self,target):
self.bind(to_context(target))
return iter(self)
def exists(self,cntx):
try:
self.bind(cntx)
self.next()
return 1
except StopIteration:
return 0
next = next_null
class self_seg(null_seg):
"""
summary: >
This path segment returns the context
node exactly once.
"""
def __str__(self): return '.'
def next_self(self):
self.next = self.next_null
return self.cntx
def bind(self,cntx):
null_seg.bind(self,cntx)
self.next = self.next_self
class root_seg(self_seg):
def __str__(self): return '/'
def bind(self,cntx):
self_seg.bind(self,cntx.root)
class parent_seg(self_seg):
def __str__(self): return '..'
def bind(self,cntx):
if cntx.parent: cntx = cntx.parent
self_seg.bind(self,cntx)
class wild_seg(null_seg):
"""
summary: >
The wild segment simply loops through
all of the sub-contexts for a given object.
If there aren't any children, this isn't an
error it just doesn't return anything.
"""
def __str__(self): return '*'
def next_wild(self):
key = self.keys.next()
return context(self.cntx,key,self.values[key])
def bind(self,cntx):
null_seg.bind(self,cntx)
typ = type(cntx.value)
if typ is ListType:
self.keys = iter(xrange(0,len(cntx.value)))
self.values = cntx.value
self.next = self.next_wild
return
if typ is DictType:
self.keys = iter(cntx.value)
self.values = cntx.value
self.next = self.next_wild
return
self.next = self.next_null
class trav_seg(null_seg):
"""
summary: >
This is a recursive traversal of the range, preorder.
It is a recursive combination of self and wild.
"""
def __str__(self): return '/'
def next(self):
while 1:
(cntx,seg) = self.stk[-1]
if not seg:
seg = wild_seg()
seg.bind(cntx)
self.stk[-1] = (cntx,seg)
return cntx
try:
cntx = seg.next()
self.stk.append((cntx,None))
except StopIteration:
self.stk.pop()
if not(self.stk):
self.next = self.next_null
raise StopIteration
def bind(self,cntx):
null_seg.bind(self,cntx)
self.stk = [(cntx,None)]
class match_seg(self_seg):
"""
summary: >
Matches a particular key within the
current context. Kinda boring.
"""
def __str__(self): return str(self.key)
def __init__(self,key):
#TODO: Do better implicit typing
try:
key = int(key)
except: pass
self.key = key
def bind(self,cntx):
try:
mtch = cntx.value[self.key]
cntx = context(cntx,self.key,mtch)
self_seg.bind(self,cntx)
except:
null_seg.bind(self,cntx)
class conn_seg(null_seg):
"""
summary: >
When two segments are connected via a slash,
this is a composite. For each context of the
parent, it binds the child, and returns each
context of the child.
"""
def __str__(self):
if self.parent.__class__ == root_seg:
return "/%s" % self.child
return "%s/%s" % (self.parent, self.child)
def __init__(self,parent,child):
self.parent = parent
self.child = child
def next(self):
while 1:
try:
return self.child.next()
except StopIteration:
cntx = self.parent.next()
self.child.bind(cntx)
def bind(self,cntx):
null_seg.bind(self,cntx)
self.parent.bind(cntx)
try:
cntx = self.parent.next()
except StopIteration:
return
self.child.bind(cntx)
class pred_seg(null_seg):
def __str__(self): return "%s[%s]" % (self.parent, self.filter)
def __init__(self,parent,filter):
self.parent = parent
self.filter = filter
def next(self):
while 1:
ret = self.parent.next()
if self.filter.exists(ret):
return ret
def bind(self,cntx):
null_seg.bind(self,cntx)
self.parent.bind(cntx)
class or_seg(null_seg):
def __str__(self): return "%s|%s" % (self.lhs,self.rhs)
def __init__(self,lhs,rhs):
self.rhs = rhs
self.lhs = lhs
self.unq = {}
def next(self):
seg = self.lhs
try:
nxt = seg.next()
self.unq[nxt] = nxt
return nxt
except StopIteration: pass
seg = self.rhs
while 1:
nxt = seg.next()
if self.unq.get(nxt,None):
continue
return nxt
def bind(self,cntx):
null_seg.bind(self,cntx)
self.lhs.bind(cntx)
self.rhs.bind(cntx)
class scalar:
def __init__(self,val):
self.val = val
def __str__(self):
return str(self.val)
def value(self):
return self.val
class equal_pred:
def exists_true(self,cntx): return 1
def exists_false(self,cntx): return 0
def exists_scalar(self,cntx):
self.rhs.bind(cntx)
try:
while 1:
cntx = self.rhs.next()
if str(cntx.value) == self.lhs: #TODO: Remove type hack
return 1
except StopIteration: pass
return 0
def exists_segment(self,cntx):
raise NotImplementedError()
def __init__(self,lhs,rhs):
if lhs.__class__ == scalar:
if rhs.__class__ == scalar:
if rhs.value() == lhs.value():
self.exists = self.exists_true
else:
self.exists = self.exists_false
else:
self.exists = self.exists_scalar
else:
if rhs.__class__ == scalar:
(lhs,rhs) = (rhs,lhs)
self.exists = self.exists_scalar
else:
self.exists = self.exists_segment
self.lhs = str(lhs.value()) #TODO: Remove type hack
self.rhs = rhs
matchSegment = re.compile(r"""^(\w+|/|\.|\*|\"|\')""")
def parse_segment(expr):
"""
Segments occur between the slashes...
"""
mtch = matchSegment.search(expr)
if not(mtch): return (None,expr)
tok = mtch.group(); siz = len(tok)
if '/' == tok: return (trav_seg(),expr)
elif '.' == tok:
if len(expr) > 1 and '.' == expr[1]:
seg = parent_seg()
siz = 2
else:
seg = self_seg()
elif '*' == tok: seg = wild_seg()
elif '"' == tok or "'" == tok:
(cur,siz) = unquote(expr)
seg = match_seg(cur)
else:
seg = match_seg(tok)
return (seg,expr[siz:])
matchTerm = re.compile(r"""^(\w+|/|\.|\(|\"|\')""")
def parse_term(expr):
mtch = matchTerm.search(expr)
if not(mtch): return (None,expr)
tok = mtch.group(); siz = len(tok)
if '/' == tok or '.' == tok:
return parse(expr)
if '(' == tok:
(term,expr) = parse_predicate(expr)
assert ')' == expr[0]
return (term,expr[1:])
elif '"' == tok or "'" == tok:
(val,siz) = unquote(expr)
else:
val = tok; siz = len(tok)
return (scalar(val),expr[siz:])
def parse_predicate(expr):
(term,expr) = parse_term(expr)
if not term: raise SyntaxError("term expected: '%s'" % expr)
tok = expr[0]
if '=' == tok:
(rhs,expr) = parse_term(expr[1:])
return (equal_pred(term,rhs),expr)
if '(' == tok:
raise "No functions allowed... yet!"
if ']' == tok or ')' == tok:
if term.__class__ is scalar:
term = match_seg(str(term))
return (term,expr)
raise SyntaxError("ypath: expecting operator '%s'" % expr)
def parse_start(expr):
"""
Initial checking on the expression, and
determine if it is relative or absolute.
"""
if type(expr) != StringType or len(expr) < 1:
raise TypeError("string required: " + repr(expr))
if '/' == expr[0]:
ypth = root_seg()
else:
ypth = self_seg()
expr = '/' + expr
return (ypth,expr)
def parse(expr):
"""
This the parser entry point, the top level node
is always a root or self segment. The self isn't
strictly necessary, but it keeps things simple.
"""
(ypth,expr) = parse_start(expr)
while expr:
tok = expr[0]
if '/' == tok:
(child, expr) = parse_segment(expr[1:])
if child: ypth = conn_seg(ypth,child)
continue
if '[' == tok:
(filter, expr) = parse_predicate(expr[1:])
assert ']' == expr[0]
expr = expr[1:]
ypth = pred_seg(ypth,filter)
continue
if '|' == tok:
(rhs, expr) = parse(expr[1:])
ypth = or_seg(ypth,rhs)
continue
if '(' == tok:
(child,expr) = parse(expr[1:])
assert ')' == expr[0]
expr = expr[1:]
ypth = conn_seg(ypth,child)
continue
break
return (ypth,expr)
class convert_to_value(null_seg):
def __init__(self,itr):
self.itr = itr
def next(self):
return self.itr.next().value
def bind(self,cntx):
self.itr.bind(cntx)
def ypath(expr,target=noTarget,cntx=0):
(ret,expr) = parse(expr)
if expr: raise SyntaxError("ypath parse error `%s`" % expr)
if not cntx: ret = convert_to_value(ret)
if target is noTarget: return ret
return ret.apply(target)
|
ystk/tools-yocto1-rpm
|
syck/ext/python/ypath.py
|
Python
|
lgpl-2.1
| 15,230
|
[
"VisIt"
] |
a93eba686113eb2d5c981ef7d3ef4b673b5efb86913aeb5affebf47eb8043b99
|
#!/usr/bin/python
"""
phylomizer - automated phylogenetic reconstruction pipeline - it resembles the
steps followed by a phylogenetist to build a gene family tree with error-
control of every step
Copyright (C) 2014-2016 - Salvador Capella-Gutierrez, Toni Gabaldon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
## To guarantee compatibility with python3.4
from __future__ import print_function
desc = """
--
phylomizer - Copyright (C) 2014-2016 Salvador Capella-Gutierrez
[salcagu_at_gmail.com], Toni Gabaldon [tgabaldon_at_crg.es]
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it
under certain conditions;
--
Phylogenetic tree reconstruction pipeline. It comprises three main steps:
1) Homology search using tools such as BLAST or HMMER.
2) Multiple Sequence Alignment (MSA) including the usage of different
aligners and the generation of alignments in different directions,
the generation of a meta-alignment and the trimming of these meta-
MSA using information from individual alignments.
3) Phylogenetic tree reconstruction using fast model selection over NJ
trees. It is possible to reconstruct trees using AA, NT or Codons.
Steps could be performed all together, individually or some combinations of
them e.g. homology + alignments, alignments + trees.
"""
import os
import sys
import argparse
import datetime
from module_homology import homology
from module_trees import phylogenetic_trees
from module_alignments import alignment, min_seqs_analysis
from module_utils import lookForFile, lookForDirectory, verbose_levels
from module_utils import readConfig, printConfig, default_verbose, format_time
from version import __version__, __revision__, __build__
__version = ("v%s rev:%s [BUILD:%s]") % (__version__, __revision__, __build__)
if __name__ == "__main__":
usage = ("\n\npython %(prog)s -i seed_sequence/s -c config_file -o output_"
+ "directory -d sequences_db [other_options]\n")
parser = argparse.ArgumentParser(description = desc, usage = usage,
formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument("-i", "--in", dest = "inFile", type = str, default = None,
help = "Input file containing the query sequence/s")
parser.add_argument("--min_seqs", dest = "minSeqs", type = str, default = None,
help = "Set the minimum sequences number to reconstruct an alignment/tree."
+ "\nThis parameter overwrites whatever is set on the config file.")
parser.add_argument("--max_hits", dest = "maxHits", type = str, default = None,
help = "Set the maximum accepted homology hits after filtering for e-value/"
+ "coverage.\nThis parameter overwrites whatever is set on the config file.")
parser.add_argument("--steps", dest = "steps", type = str, default = ["all"],
choices = ["all", "homology", "alignments", "trees"], nargs = "*", help = \
"Set which step/s should be performed by the script")
parser.add_argument("-d", "--db", dest = "dbFile", type = str, default = None,
help = "Input file containing the target sequence database")
parser.add_argument("--cds", dest = "cdsFile", type = str, default = None,
help = "Input file containing CDS corresponding to input protein seqs")
parser.add_argument("-c", "--config", dest = "configFile", default = None, \
type = str, help = "Input configuration file")
parser.add_argument("-o", "--out", dest = "outFolder", type = str, default = \
".", help = "Output folder where all generated files will be dumped")
parser.add_argument("-p", "--prefix", dest = "prefix", type = str, default = \
"", help = "Set the prefix for all output files generated by the pipeline")
parser.add_argument("-r", "--replace", dest = "replace", default = False, \
action = "store_true", help = "Over-write any previously generated file")
parser.add_argument("--no_force_seed", dest = "forcedSeed", default = True, \
action = "store_false", help = "Avoid forcing the inclusion of the sequence"
+ " used for the homology search\nThis parameter overwrites whatever is set"
+ "on the config file")
parser.add_argument("--version", action = "version", version ='%(prog)s ' \
+ __version)
parser.add_argument("-v", "--verbose", dest = "verbose", type = str, default \
= None, choices = sorted(verbose_levels.keys()), help = "Set how informati"
+ "on should be dumped. It could be used levels or tags\nIt overwrites what"
+ "ever is set on the configuration file.")
## If no arguments are given, just show the help and finish
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
## Check which steps should be performed. Some combinations are not allowed
if len(args.steps) > 1 and "all" in args.steps:
sys.exit(("ERROR: Check selected steps - 'all' cannot be combined with "
+ "others: [ '%s' ]") % ("', '".join(args.steps)))
if set(args.steps) ^ set(["homology", "trees"]) == set():
sys.exit(("ERROR: Check selected steps - 'homology' + 'trees' is not allow"
+ "ed: [ '%s' ]") % ("', '".join(args.steps)))
if "all" in args.steps:
args.steps = ["homology", "alignments", "trees"]
## Get current directory - we will use this for normalizing input files and
## directories to their absolute paths
current_directory = os.getcwd()
## Assign input parameters directly to the dictionary which will contain all
## current run configuration.
parameters = {}
parameters.setdefault("replace", args.replace)
## Assign which step is being executed. It is useful to know whether the log
## file should be replaced or not - even when the flag "replace" is set
parameters.setdefault("step", 0)
## Check parameters related to files / directories
if not lookForFile(args.inFile):
sys.exit(("ERROR: Check input SEQUENCE/s file '%s'") % (args.inFile))
parameters.setdefault("in_file", os.path.abspath(args.inFile))
if "homology" in args.steps:
if not lookForFile(args.dbFile):
sys.exit(("ERROR: Check input TARGET SEQUENCES file '%s' [Mode: HOMOLOGY "
+ "SEARCH]") % (args.dbFile))
parameters.setdefault("db_file", os.path.abspath(args.dbFile))
if args.cdsFile:
if not lookForFile(args.cdsFile):
sys.exit(("ERROR: Check input CDS file '%s'") % (args.cdsFile))
parameters.setdefault("cds", os.path.abspath(args.cdsFile))
if not lookForFile(args.configFile):
sys.exit(("ERROR: Check input CONFIG file '%s'") % (args.configFile))
parameters.setdefault("config_file", os.path.abspath(args.configFile))
if not lookForDirectory(args.outFolder):
sys.exit(("ERROR: Check output folder '%s'") % (args.outFolder))
parameters.setdefault("out_directory", os.path.abspath(args.outFolder))
## Set output files prefix name depending on input user selection
tag = os.path.split(args.inFile)[1].split(".")[0]
parameters.setdefault("prefix", args.prefix if args.prefix else tag)
## Read the other parameters from the input config file
parameters.update(readConfig(parameters["config_file"]))
## Check parameters specific to the homology search
if "homology" in args.steps:
## Check specific values for input parameters.
if not "coverage" in parameters or not (0.0 < float(parameters["coverage"]) \
<= 1.0):
sys.exit(("ERROR: Check your 'coverage' parameter"))
## Overwrite maximum homology hits when set any value by command-line
if args.maxHits:
parameters["hits"] = args.maxHits
if not "hits" in parameters or (parameters["hits"].isdigit() and \
int(parameters["hits"]) < 1) or (not parameters["hits"].isdigit() \
and parameters["hits"] != "no_limit"):
sys.exit(("ERROR: Check your 'homology accepted hits' upper limit value"))
## Include information about whether the sequence used to perform the
## homology search should be included - even if it is not present among the
## homology results - or not.
if "force_seed_sequence" in parameters and not args.forcedSeed:
parameters["force_seed_sequence"] = False
if not "force_seed_sequence" in parameters:
parameters["force_seed_sequence"] = args.forcedSeed
## Check parameters specific to the alignment and tree reconstruction steps
if set(["alignments", "trees"]) & set(args.steps) != set():
## Set minimum sequences number for which any alignment/tree has to be
## reconstructed
if not "min_seqs" in parameters and not args.minSeqs:
parameters.setdefault("min_seqs", min_seqs_analysis)
elif args.minSeqs:
parameters["min_seqs"] = args.minSeqs
if not parameters["min_seqs"].isdigit() or int(parameters["min_seqs"]) < 1:
sys.exit(("ERROR: Check your 'minimum sequnces number' value"))
## Check whether alignment will be reconstructed in one or two directions,
## i.e. head and tails.
if not "both_direction" in parameters:
parameters["both_direction"] = True
## Configure level of verbosity
if not "verbose" in parameters and not args.verbose:
parameters["verbose"] = verbose_levels[default_verbose]
elif "verbose" in parameters and not parameters["verbose"] in verbose_levels:
sys.exit(("ERROR: Check your 'verbose' parameter. Available tags/levels [ '"
+ "%s' ]") % "', '".join(sorted(verbose_levels.keys())))
else:
key = args.verbose if args.verbose else parameters["verbose"]
parameters["verbose"] = verbose_levels[key]
## Print all set-up parameters
if parameters["verbose"] > 0:
printConfig(parameters)
## If verbosity has to be redirected to no-where or to a specific log-file,
## open that file - depending on existence/replace flag - and dump the
## appropriate content there - In case of no verbosity it will be nothing.
if parameters["verbose"] in [0, 1]:
## Get output folder/generic filename - and open log file
oFile = os.path.join(parameters["out_directory"], parameters["prefix"])
logFile = open(oFile + ".log", "w" if parameters["replace"] else "a+")
if parameters["verbose"] == 1:
## We don't want to lose all configuration so we set the step to 1
printConfig(parameters, logFile)
parameters["step"] = 1
logFile.close()
## We start counting the time for the whole process
start = datetime.datetime.now()
## Launch the whole homology process - update some values in the parameters
## dictionary. It is needed to perform appropiately the next step
if "homology" in args.steps:
parameters.update(homology(parameters))
## Assign which step is being executed. It is useful to know whether the log
## file should be replaced or not - even when the flag "replace" is set
parameters["step"] += 1
## Reconstruct the Multiple Sequence Alignment for the selected sequences
if "alignments" in args.steps:
parameters.update(alignment(parameters))
## Assign which step is being executed. It is useful to know whether the log
## file should be replaced or not - even when the flag "replace" is set
parameters["step"] += 1
## Reconstruct the phylogenetic tree for the input alignment either generated
## on a previous step or given as input
if "trees" in args.steps:
phylogenetic_trees(parameters)
## Get final time
final = datetime.datetime.now()
## We return a DELTA object comparing both timestamps
steps = "', '".join(args.steps)
total = format_time(final - start if start else 0)
## Dump into stderr - when requested all verbose info or just stderr
if parameters["verbose"] > 0:
print(("\n###\tTOTAL Time\t[ '%s' ]\t%s\n###") % (steps, total), file = \
sys.stderr)
## Dump into logfile - when requested all verbose info or just logfile
if parameters["verbose"] == 1:
## Get output folder/generic filename - Set output filename and log file
oFile = os.path.join(parameters["out_directory"], parameters["prefix"])
logFile = open(oFile + ".log", "a+")
print(("\n###\tTOTAL Time\t[ '%s' ]\t%s\n###") % (steps, total), file = \
logFile)
logFile.close()
|
Gabaldonlab/phylomizer
|
source/phylomizer.py
|
Python
|
gpl-3.0
| 12,704
|
[
"BLAST"
] |
88c5c2437075af9f07ae3f3553380b73942aeb549d9c5853cf2c1d66f74e10ae
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import re
from HTMLParser import HTMLParser
from sqlalchemy import or_, orm
from sqlalchemy.dialects.postgresql import JSONB
from app import db
from app import logger
import page
from util import NoDoiException
from util import clean_doi
from util import is_doi_url
from util import normalize_title
DEBUG_BASE = False
def title_is_too_short(normalized_title):
if not normalized_title:
return True
return len(normalized_title) <= 21
def title_is_too_common(normalized_title):
# these common titles were determined using this SQL,
# which lists the titles of BASE hits that matched titles of more than 2 articles in a sample of 100k articles.
# ugly sql, i know. but better to include here as a comment than not, right?
# select norm_title, count(*) as c from (
# select id, response_jsonb->>'free_fulltext_url' as url, api->'_source'->>'title' as title, normalize_title_v2(api->'_source'->>'title') as norm_title
# from crossref where response_jsonb->>'free_fulltext_url' in
# ( select url from (
# select response_jsonb->>'free_fulltext_url' as url, count(*) as c
# from crossref
# where crossref.response_jsonb->>'free_fulltext_url' is not null
# and id in (select id from dois_random_articles_1mil_do_hybrid_100k limit 100000)
# group by url
# order by c desc) s where c > 1 ) limit 1000 ) ss group by norm_title order by c desc
# and then have added more to it
common_title_string = """
informationreaders
informationcontributors
editorialboardpublicationinformation
insidefrontcovereditorialboard
graphicalcontentslist
instructionsauthors
reviewsandnoticesbooks
editorialboardaimsandscope
contributorsthisissue
parliamentaryintelligence
editorialadvisoryboard
informationauthors
instructionscontributors
royalsocietymedicine
guesteditorsintroduction
cumulativesubjectindexvolumes
acknowledgementreviewers
medicalsocietylondon
ouvragesrecuslaredaction
royalmedicalandchirurgicalsociety
moderntechniquetreatment
reviewcurrentliterature
answerscmeexamination
publishersannouncement
cumulativeauthorindex
abstractsfromcurrentliterature
booksreceivedreview
royalacademymedicineireland
editorialsoftwaresurveysection
cumulativesubjectindex
acknowledgementreferees
specialcorrespondence
atmosphericelectricity
classifiedadvertising
softwaresurveysection
abstractscurrentliterature
britishmedicaljournal
veranstaltungskalender
internationalconference
processintensification
titlepageeditorialboard
americanpublichealthassociation
deepbrainstimulationparkinsonsdisease
mathematicalmorphologyanditsapplicationssignalandimageprocessing
principalcomponentanalysis
acuterespiratorydistresssyndrome
chronicobstructivepulmonarydisease
fullscaleevaluationsocaptureincreasesemidryfgdtechnology
conferenceannouncements
thconferencecorporateentitiesmarketandeuropeandimensions
postersessionabstracts
britishjournaldermatology
poincareandthreebodyproblem
systemiclupuserythematosus
bayeractivitiesdailylivingscalebadl
mineralogicalsocietyamerica
stsegmentelevationmyocardialinfarction
systematicobservationcoachleadershipbehavioursyouthsport
proximityawaremultiplemeshesdecimationusingquadricerrormetric
radiochemicalandchemicalconstituentswaterselectedwellsandspringssouthernboundaryidahonationalengineeringandenvironmentallaboratoryhagermanareaidaho
entrepreneurialleadership
dictionaryepidemiology
chieldcausalhypothesesevolutionarylinguisticsdatabase
socialinequalitieshealth
cancerincidenceandmortalitychina
creativecommonseducatorsandlibrarians
learningandsizegovernmentspendingmultiplier
"""
for common_title in common_title_string.split("\n"):
if normalized_title == common_title.strip():
return True
return False
def oai_tag_match(tagname, record, return_list=False):
if not tagname in record.metadata:
return None
matches = record.metadata[tagname]
if return_list:
return matches # will be empty list if we found naught
else:
try:
return matches[0]
except IndexError: # no matches.
return None
class PmhRecord(db.Model):
id = db.Column(db.Text, primary_key=True)
repo_id = db.Column(db.Text) # delete once endpoint_ids are all populated
endpoint_id = db.Column(db.Text)
doi = db.Column(db.Text)
record_timestamp = db.Column(db.DateTime)
api_raw = db.Column(JSONB)
title = db.Column(db.Text)
license = db.Column(db.Text)
oa = db.Column(db.Text)
urls = db.Column(JSONB)
authors = db.Column(JSONB)
relations = db.Column(JSONB)
sources = db.Column(JSONB)
updated = db.Column(db.DateTime)
rand = db.Column(db.Numeric)
pmh_id = db.Column(db.Text)
pages = db.relationship(
# 'Page',
'PageNew',
lazy='select', # lazy load
cascade="all, delete-orphan",
# don't want a backref because don't want page to link to this
foreign_keys="PageNew.pmh_id"
)
@property
def bare_pmh_id(self):
return self.pmh_id or self.id
def __init__(self, **kwargs):
self.updated = datetime.datetime.utcnow().isoformat()
super(self.__class__, self).__init__(**kwargs)
def populate(self, endpoint_id, pmh_input_record):
self.updated = datetime.datetime.utcnow().isoformat()
self.id = u'{}:{}'.format(endpoint_id, pmh_input_record.header.identifier),
self.endpoint_id = endpoint_id
self.pmh_id = pmh_input_record.header.identifier
self.api_raw = pmh_input_record.raw
self.record_timestamp = pmh_input_record.header.datestamp
self.title = oai_tag_match("title", pmh_input_record)
self.authors = oai_tag_match("creator", pmh_input_record, return_list=True)
self.relations = oai_tag_match("relation", pmh_input_record, return_list=True)
self.oa = oai_tag_match("oa", pmh_input_record)
self.license = oai_tag_match("rights", pmh_input_record)
self.sources = oai_tag_match("collname", pmh_input_record, return_list=True)
identifier_matches = oai_tag_match("identifier", pmh_input_record, return_list=True)
self.urls = self.get_good_urls(identifier_matches)
if not self.urls:
self.urls = self.get_good_urls(self.relations)
possible_dois = []
if self.relations:
possible_dois += [s for s in self.relations if s and '/*ref*/' not in s and not s.startswith('reference')]
if identifier_matches:
possible_dois += [s for s in identifier_matches if s]
if possible_dois:
for possible_doi in possible_dois:
if (
is_doi_url(possible_doi)
or possible_doi.startswith(u"doi:")
or re.findall(ur"10\.\d", possible_doi)
):
try:
doi_candidate = clean_doi(possible_doi)
if not doi_candidate:
continue
skip_these_doi_snippets = [
u'10.17605/osf.io',
u'10.14279/depositonce',
u'/(issn)',
u'10.17169/refubium',
]
for doi_snippet in skip_these_doi_snippets:
if doi_snippet.lower() in doi_candidate.lower():
doi_candidate = None
break
if doi_candidate:
self.doi = doi_candidate
except NoDoiException:
pass
self.doi = self._doi_override_by_id().get(self.bare_pmh_id, self.doi)
self.title = self._title_override_by_id().get(self.bare_pmh_id, self.title)
@staticmethod
def _title_override_by_id():
return {
# wrong title
u'oai:RePEc:feb:natura:00655': u'Do Workers Value Flexible Jobs? A Field Experiment On Compensating Differentials',
# reviews of books with same title
u'oai:ir.uiowa.edu:annals-of-iowa-11115': u'(Book Notice) The Bull Moose Years: Theodore Roosevelt and the Progressive Party',
u'oai:ir.uiowa.edu:annals-of-iowa-9228': u'(Book Review) Land, Piety, Peoplehood: The Establishment of Mennonite Communities in America, 1683-1790',
}
@staticmethod
def _doi_override_by_id():
return {
# wrong DOI in identifier url
u'oai:dspace.flinders.edu.au:2328/36108': u'10.1002/eat.22455',
# picked up wrong DOI in relation
u'oai:oai.kemsu.elpub.ru:article/2590': u'10.21603/2078-8975-2018-4-223-231',
# junk in identifier
u'oai:scholarspace.manoa.hawaii.edu:10125/42031': u'10.18357/ijih122201717783',
# wrong DOI in relation
u'oai:oai.perinatology.elpub.ru:article/560': u'10.21508/1027-4065-2017-62-5-111-118',
u'oai:HAL:hal-00927061v2': u'10.1090/memo/1247',
u'oai:revistas.ucm.es:article/62495': u'10.5209/clac.62495',
u'oai:oro.open.ac.uk:57403': u'10.1090/hmath/011',
u'oai:eprints.soas.ac.uk:22576': u'10.4324/9781315762210-8',
u'oai:oai.mir.elpub.ru:article/838': u'10.18184/2079-4665.2018.9.3.338-350',
u'oai:arXiv.org:1605.06120': None,
u'oai:research-repository.griffith.edu.au:10072/80920': None,
u'oai:HAL:cea-01550620v1': '10.1103/physrevb.93.214414',
u'oai:ora.ox.ac.uk:uuid:f5740dd3-0b45-4e7b-8f2e-d4872a6c326c': '10.1016/j.jclinepi.2017.12.022',
}
def get_good_urls(self, candidate_urls):
valid_urls = []
# pmc can only add pmc urls. otherwise has junk about dois that aren't actually open.
if candidate_urls:
if "oai:pubmedcentral.nih.gov" in self.id:
for url in candidate_urls:
if "/pmc/" in url and url != "http://www.ncbi.nlm.nih.gov/pmc/articles/PMC":
pmcid_matches = re.findall(".*(PMC\d+).*", url)
if pmcid_matches:
pmcid = pmcid_matches[0]
url = u"https://www.ncbi.nlm.nih.gov/pmc/articles/{}".format(pmcid)
valid_urls.append(url)
else:
valid_urls += [url for url in candidate_urls if url and url.startswith(u"http")]
# filter out doi urls unless they are the only url
# might be a figshare url etc, but otherwise is usually to a publisher page which
# may or may not be open, and we are handling through hybrid path
if len(valid_urls) > 1:
valid_urls = [url for url in valid_urls if u"doi.org/" not in url]
valid_urls = [url for url in valid_urls if u"doi.org/10.1111/" not in url]
# filter out some urls that we know are closed or otherwise not useful
blacklist_url_snippets = [
u"/10.1093/analys/",
u"academic.oup.com/analysis",
u"analysis.oxfordjournals.org/",
u"ncbi.nlm.nih.gov/pubmed/",
u"gateway.webofknowledge.com/",
u"orcid.org/",
u"researchgate.net/",
u"academia.edu/",
u"europepmc.org/abstract/",
u"ftp://",
u"api.crossref",
u"api.elsevier",
u"api.osf",
u"eprints.soton.ac.uk/413275",
u"eprints.qut.edu.au/91459/3/91460.pdf",
u"hdl.handle.net/2117/168732",
]
backlist_url_patterns = map(re.escape, blacklist_url_snippets) + [
ur'springer.com/.*/journal/\d+$',
ur'springer.com/journal/\d+$',
ur'supinfo.pdf$',
ur'Appendix[^/]*\.pdf$',
ur'^https?://www\.icgip\.org/?$',
ur'^https?://(www\.)?agu.org/journals/',
ur'issue/current$',
ur'/809AB601-EF05-4DD1-9741-E33D7847F8E5\.pdf$',
]
for url_snippet in backlist_url_patterns:
valid_urls = [url for url in valid_urls if not re.search(url_snippet, url)]
# and then html unescape them, because some are html escaped
h = HTMLParser()
valid_urls = [h.unescape(url) for url in valid_urls]
# make sure they are actually urls
valid_urls = [url for url in valid_urls if url.startswith("http")]
if self.bare_pmh_id.startswith('oai:ora.ox.ac.uk:uuid:') and not valid_urls:
# https://ora.ox.ac.uk
# pmh records don't have page urls but we can guess them
# remove 'oai:ora.ox.ac.uk:' prefix and append to base URL
valid_urls.append(u'https://ora.ox.ac.uk/objects/{}'.format(self.bare_pmh_id[len('oai:ora.ox.ac.uk:'):]))
valid_urls = list(set(valid_urls))
return valid_urls
def mint_page_for_url(self, page_class, url):
from page import PageNew
# this is slow, but no slower then looking for titles before adding pages
existing_page = PageNew.query.filter(PageNew.normalized_title==self.calc_normalized_title(),
PageNew.match_type==page_class.__mapper_args__["polymorphic_identity"],
PageNew.url==url,
PageNew.endpoint_id==self.endpoint_id
).options(orm.noload('*')).first()
if existing_page:
my_page = existing_page
else:
my_page = page_class()
my_page.url = url
my_page.normalized_title = self.calc_normalized_title()
my_page.endpoint_id = self.endpoint_id
my_page.doi = self.doi
my_page.title = self.title
my_page.authors = self.authors
my_page.record_timestamp = self.record_timestamp
my_page.pmh_id = self.id
my_page.repo_id = self.repo_id # delete once endpoint_ids are all populated
return my_page
def calc_normalized_title(self):
if not self.title:
return None
if self.endpoint_id == '63d70f0f03831f36129':
# figshare. the record is for a figure but the title is from its parent article.
return None
working_title = self.title
# repo specific rules
# AMNH adds biblio to the end of titles, which ruins match. remove this.
# example http://digitallibrary.amnh.org/handle/2246/6816 oai:digitallibrary.amnh.org:2246/6816
if "amnh.org" in self.id:
# cut off the last part, after an openning paren
working_title = re.sub(u"(Bulletin of.+no.+\d+)", "", working_title, re.IGNORECASE | re.MULTILINE)
working_title = re.sub(u"(American Museum nov.+no.+\d+)", "", working_title, re.IGNORECASE | re.MULTILINE)
# for endpoint 0dde28a908329849966, adds this to end of all titles, so remove (eg http://hdl.handle.net/11858/00-203Z-0000-002E-72BD-3)
working_title = re.sub(u"vollständige digitalisierte Ausgabe", "", working_title, re.IGNORECASE | re.MULTILINE)
return normalize_title(working_title)
def delete_old_record(self):
# old records used the bare record_id as pmh_record.id
# delete the old record before merging, instead of conditionally updating or creating the new record
db.session.query(PmhRecord).filter(
PmhRecord.id == self.bare_pmh_id, PmhRecord.endpoint_id == self.endpoint_id
).delete()
def mint_pages(self):
self.pages = []
# this should have already been done when setting .urls, but do it again in case there were improvements
# case in point: new url patterns added to the blacklist
good_urls = self.get_good_urls(self.urls)
for url in good_urls:
if self.doi:
my_page = self.mint_page_for_url(page.PageDoiMatch, url)
self.pages.append(my_page)
normalized_title = self.calc_normalized_title()
if normalized_title:
num_pages_with_this_normalized_title = db.session.query(page.PageTitleMatch.id).filter(page.PageTitleMatch.normalized_title==normalized_title).count()
if num_pages_with_this_normalized_title >= 20:
logger.info(u"not minting page because too many with this title: {}".format(normalized_title))
else:
my_page = self.mint_page_for_url(page.PageTitleMatch, url)
self.pages.append(my_page)
# logger.info(u"minted pages: {}".format(self.pages))
# delete pages with this pmh_id that aren't being updated
db.session.query(page.PageNew).filter(
page.PageNew.endpoint_id == self.endpoint_id,
or_(page.PageNew.pmh_id == self.id, page.PageNew.pmh_id == self.pmh_id),
page.PageNew.id.notin_([p.id for p in self.pages])
).delete(synchronize_session=False)
return self.pages
def __repr__(self):
return u"<PmhRecord ({}) doi:{} '{}...'>".format(self.id, self.doi, self.title[0:20])
def to_dict(self):
response = {
"oaipmh_id": self.bare_pmh_id,
"oaipmh_record_timestamp": self.record_timestamp and self.record_timestamp.isoformat(),
"urls": self.urls,
"title": self.title
}
return response
|
Impactstory/oadoi
|
pmh_record.py
|
Python
|
mit
| 18,129
|
[
"MOOSE"
] |
7934363d112ac73f7475d1cb04627efb9bdfee482d1f16c2dec6e7f60e388506
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**********************************************
**espresso.integrator.VelocityVerletOnRadius**
**********************************************
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.integrator.Extension import *
from _espresso import integrator_VelocityVerletOnRadius
class VelocityVerletOnRadiusLocal(ExtensionLocal, integrator_VelocityVerletOnRadius):
'The (local) VelocityVerletOnRadius.'
def __init__(self, system, dampingmass):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_VelocityVerletOnRadius, system, dampingmass)
if pmi.isController :
class VelocityVerletOnRadius(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.integrator.VelocityVerletOnRadiusLocal',
pmiproperty = [ 'radialDampingMass' ]
)
|
BackupTheBerlios/espressopp
|
src/integrator/VelocityVerletOnRadius.py
|
Python
|
gpl-3.0
| 1,817
|
[
"ESPResSo"
] |
e00e2e0a55d75906bfb7118c5ac0b6bc00ac89cc0ef7aa92aecd05df7716e96a
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from ibis.compat import lzip
import ibis.common as com
import ibis.expr.analysis as L
import ibis.expr.analytics as analytics
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.sql.transforms as transforms
import ibis.util as util
import ibis
# ---------------------------------------------------------------------
class QueryAST(object):
def __init__(self, context, queries):
self.context = context
self.queries = queries
class SelectBuilder(object):
"""
Transforms expression IR to a query pipeline (potentially multiple
queries). There will typically be a primary SELECT query, perhaps with some
subqueries and other DDL to ingest and tear down intermediate data sources.
Walks the expression tree and catalogues distinct query units, builds
select statements (and other DDL types, where necessary), and records
relevant query unit aliases to be used when actually generating SQL.
"""
def __init__(self, expr, context):
self.expr = expr
self.query_expr, self.result_handler = _adapt_expr(self.expr)
self.sub_memo = {}
self.context = context
self.queries = []
self.table_set = None
self.select_set = None
self.group_by = None
self.having = None
self.filters = []
self.limit = None
self.sort_by = []
self.subqueries = []
self.distinct = False
self.op_memo = util.IbisSet()
def get_result(self):
# make idempotent
if len(self.queries) > 0:
return self._wrap_result()
# Generate other kinds of DDL statements that may be required to
# execute the passed query. For example, loding
setup_queries = self._generate_setup_queries()
# Make DDL statements to be executed after the main primary select
# statement(s)
teardown_queries = self._generate_teardown_queries()
select_query = self._build_result_query()
self.queries.extend(setup_queries)
self.queries.append(select_query)
self.queries.extend(teardown_queries)
return select_query
def _generate_setup_queries(self):
return []
def _generate_teardown_queries(self):
return []
def _build_result_query(self):
self._collect_elements()
self._analyze_select_exprs()
self._analyze_filter_exprs()
self._analyze_subqueries()
self._populate_context()
klass = self._select_class
return klass(self.table_set, self.select_set,
subqueries=self.subqueries,
where=self.filters,
group_by=self.group_by,
having=self.having,
limit=self.limit,
order_by=self.sort_by,
distinct=self.distinct,
result_handler=self.result_handler,
parent_expr=self.query_expr,
context=self.context)
def _populate_context(self):
# Populate aliases for the distinct relations used to output this
# select statement.
if self.table_set is not None:
self._make_table_aliases(self.table_set)
# XXX: This is a temporary solution to the table-aliasing / correlated
# subquery problem. Will need to revisit and come up with a cleaner
# design (also as one way to avoid pathological naming conflicts; for
# example, we could define a table alias before we know that it
# conflicts with the name of a table used in a subquery, join, or
# another part of the query structure)
# There may be correlated subqueries inside the filters, requiring that
# we use an explicit alias when outputting as SQL. For now, we're just
# going to see if any table nodes appearing in the where stack have
# been marked previously by the above code.
for expr in self.filters:
needs_alias = _foreign_ref_check(self, expr)
if needs_alias:
self.context.set_always_alias()
def _make_table_aliases(self, expr):
ctx = self.context
node = expr.op()
if isinstance(node, ops.Join):
for arg in node.args:
if not isinstance(arg, ir.TableExpr):
continue
self._make_table_aliases(arg)
else:
if not ctx.is_extracted(expr):
ctx.make_alias(expr)
# ---------------------------------------------------------------------
# Expr analysis / rewrites
def _analyze_select_exprs(self):
new_select_set = []
for expr in self.select_set:
new_expr = self._visit_select_expr(expr)
new_select_set.append(new_expr)
self.select_set = new_select_set
def _visit_select_expr(self, expr):
op = expr.op()
method = '_visit_select_{0}'.format(type(op).__name__)
if hasattr(self, method):
f = getattr(self, method)
return f(expr)
unchanged = True
if isinstance(op, ops.ValueNode):
new_args = []
for arg in op.args:
if isinstance(arg, ir.Expr):
new_arg = self._visit_select_expr(arg)
if arg is not new_arg:
unchanged = False
new_args.append(new_arg)
else:
new_args.append(arg)
if not unchanged:
return expr._factory(type(op)(*new_args))
else:
return expr
else:
return expr
def _visit_select_Histogram(self, expr):
op = expr.op()
EPS = 1e-13
if op.binwidth is None or op.base is None:
aux_hash = op.aux_hash or util.guid()[:6]
min_name = 'min_%s' % aux_hash
max_name = 'max_%s' % aux_hash
minmax = self.table_set.aggregate([op.arg.min().name(min_name),
op.arg.max().name(max_name)])
self.table_set = self.table_set.cross_join(minmax)
if op.base is None:
base = minmax[min_name] - EPS
else:
base = op.base
binwidth = (minmax[max_name] - base) / (op.nbins - 1)
else:
# Have both a bin width and a base
binwidth = op.binwidth
base = op.base
bucket = (op.arg - base) / binwidth
return bucket.floor().name(expr._name)
def _analyze_filter_exprs(self):
# What's semantically contained in the filter predicates may need to be
# rewritten. Not sure if this is the right place to do this, but a
# starting point
# Various kinds of semantically valid WHERE clauses may need to be
# rewritten into a form that we can actually translate into valid SQL.
new_where = []
for expr in self.filters:
new_expr = self._visit_filter(expr)
# Transformations may result in there being no outputted filter
# predicate
if new_expr is not None:
new_where.append(new_expr)
self.filters = new_where
def _visit_filter(self, expr):
# Dumping ground for analysis of WHERE expressions
# - Subquery extraction
# - Conversion to explicit semi/anti joins
# - Rewrites to generate subqueries
op = expr.op()
method = '_visit_filter_{0}'.format(type(op).__name__)
if hasattr(self, method):
f = getattr(self, method)
return f(expr)
unchanged = True
if isinstance(expr, ir.ScalarExpr):
if ops.is_reduction(expr):
return self._rewrite_reduction_filter(expr)
if isinstance(op, ops.BinaryOp):
left = self._visit_filter(op.left)
right = self._visit_filter(op.right)
unchanged = left is op.left and right is op.right
if not unchanged:
return type(expr)(type(op)(left, right))
else:
return expr
elif isinstance(op, (ops.Any, ops.BooleanValueOp,
ops.TableColumn, ir.Literal)):
return expr
elif isinstance(op, ops.ValueNode):
visited = [self._visit_filter(arg)
if isinstance(arg, ir.Expr) else arg
for arg in op.args]
unchanged = True
for new, old in zip(visited, op.args):
if new is not old:
unchanged = False
if not unchanged:
return type(expr)(type(op)(*visited))
else:
return expr
else:
raise NotImplementedError(type(op))
def _rewrite_reduction_filter(self, expr):
# Find the table that this reduction references.
# TODO: what about reductions that reference a join that isn't visible
# at this level? Means we probably have the wrong design, but will have
# to revisit when it becomes a problem.
aggregation, _ = _reduction_to_aggregation(expr, default_name='tmp')
return aggregation.to_array()
def _visit_filter_Any(self, expr):
# Rewrite semi/anti-join predicates in way that can hook into SQL
# translation step
transform = transforms.AnyToExistsTransform(self.context, expr,
self.table_set)
return transform.get_result()
_visit_filter_NotAny = _visit_filter_Any
def _visit_filter_SummaryFilter(self, expr):
# Top K is rewritten as an
# - aggregation
# - sort by
# - limit
# - left semi join with table set
parent_op = expr.op()
summary_expr = parent_op.args[0]
op = summary_expr.op()
rank_set = summary_expr.to_aggregation(
backup_metric_name='__tmp__',
parent_table=self.table_set)
# GH #667; this may reference a filtered version of self.table_set
arg = L.substitute_parents(op.arg)
pred = (arg == getattr(rank_set, op.arg.get_name()))
self.table_set = self.table_set.semi_join(rank_set, [pred])
return None
# ---------------------------------------------------------------------
# Analysis of table set
def _collect_elements(self):
# If expr is a ValueExpr, we must seek out the TableExprs that it
# references, build their ASTs, and mark them in our QueryContext
# For now, we need to make the simplifying assumption that a value
# expression that is being translated only depends on a single table
# expression.
source_expr = self.query_expr
# hm, is this the best place for this?
root_op = source_expr.op()
if (isinstance(root_op, ops.Join) and
not isinstance(root_op, ops.MaterializedJoin)):
# Unmaterialized join
source_expr = source_expr.materialize()
if isinstance(root_op, ops.TableNode):
self._collect(source_expr, toplevel=True)
if self.table_set is None:
raise com.InternalError('no table set')
else:
# Expressions not depending on any table
if isinstance(root_op, ir.ExpressionList):
self.select_set = source_expr.exprs()
else:
self.select_set = [source_expr]
def _collect(self, expr, toplevel=False):
op = expr.op()
method = '_collect_{0}'.format(type(op).__name__)
# Do not visit nodes twice
if op in self.op_memo:
return
if hasattr(self, method):
f = getattr(self, method)
f(expr, toplevel=toplevel)
elif isinstance(op, (ops.PhysicalTable, ops.SQLQueryResult)):
self._collect_PhysicalTable(expr, toplevel=toplevel)
elif isinstance(op, ops.Join):
self._collect_Join(expr, toplevel=toplevel)
else:
raise NotImplementedError(type(op))
self.op_memo.add(op)
def _collect_Distinct(self, expr, toplevel=False):
if toplevel:
self.distinct = True
self._collect(expr.op().table, toplevel=toplevel)
def _collect_Filter(self, expr, toplevel=False):
op = expr.op()
self.filters.extend(op.predicates)
if toplevel:
self.select_set = [op.table]
self.table_set = op.table
self._collect(op.table)
def _collect_Limit(self, expr, toplevel=False):
if not toplevel:
return
op = expr.op()
# Ignore "inner" limits, because they've been overrided by an exterior
# one
if self.limit is None:
self.limit = {
'n': op.n,
'offset': op.offset
}
self._collect(op.table, toplevel=toplevel)
def _collect_Union(self, expr, toplevel=False):
if not toplevel:
return
else:
raise NotImplementedError
def _collect_SortBy(self, expr, toplevel=False):
op = expr.op()
self.sort_by = op.keys
if toplevel:
# HACK: yuck, need a better way to know if we should perform a
# select * from a subquery here
parent_op = op.table.op()
if (isinstance(parent_op, ir.BlockingTableNode) and
not isinstance(parent_op, ops.Aggregation)):
self.select_set = [op.table]
self.table_set = op.table
toplevel = False
self._collect(op.table, toplevel=toplevel)
def _collect_Aggregation(self, expr, toplevel=False):
# The select set includes the grouping keys (if any), and these are
# duplicated in the group_by set. SQL translator can decide how to
# format these depending on the database. Most likely the
# GROUP BY 1, 2, ... style
if toplevel:
subbed_expr = self._sub(expr)
sub_op = subbed_expr.op()
self.group_by = self._convert_group_by(sub_op.by)
self.having = sub_op.having
self.select_set = sub_op.by + sub_op.agg_exprs
self.table_set = sub_op.table
self._collect(expr.op().table)
def _collect_Projection(self, expr, toplevel=False):
op = expr.op()
table = op.table
if toplevel:
subbed = self._sub(expr)
sop = subbed.op()
if isinstance(table.op(), ops.Join):
can_sub = self._collect_Join(table)
else:
can_sub = True
self._collect(table)
selections = op.selections
if can_sub:
selections = sop.selections
table = sop.table
self.select_set = selections
self.table_set = table
def _collect_MaterializedJoin(self, expr, toplevel=False):
op = expr.op()
join = op.join
if toplevel:
subbed = self._sub(join)
self.table_set = subbed
self.select_set = [subbed]
self._collect_Join(join, toplevel=False)
def _convert_group_by(self, exprs):
return list(range(len(exprs)))
def _collect_Join(self, expr, toplevel=False):
if toplevel:
subbed = self._sub(expr)
self.table_set = subbed
self.select_set = [subbed]
subtables = _get_subtables(expr)
# If any of the joined tables are non-blocking modified versions
# (e.g. with Filter) of the same table, then it's not safe to continue
# walking down the tree (see #667), and we should instead have inline
# views rather than attempting to fuse things together into the same
# SELECT query.
can_substitute = _all_distinct_roots(subtables)
if can_substitute:
for table in subtables:
self._collect(table, toplevel=False)
return can_substitute
def _collect_PhysicalTable(self, expr, toplevel=False):
if toplevel:
self.select_set = [expr]
self.table_set = expr # self._sub(expr)
def _collect_SelfReference(self, expr, toplevel=False):
op = expr.op()
if toplevel:
self._collect(op.table, toplevel=toplevel)
def _sub(self, what):
if isinstance(what, list):
return [L.substitute_parents(x, self.sub_memo) for x in what]
else:
return L.substitute_parents(what, self.sub_memo)
# --------------------------------------------------------------------
# Subquery analysis / extraction
def _analyze_subqueries(self):
# Somewhat temporary place for this. A little bit tricky, because
# subqueries can be found in many places
# - With the table set
# - Inside the where clause (these may be able to place directly, some
# cases not)
# - As support queries inside certain expressions (possibly needing to
# be extracted and joined into the table set where they are
# used). More complex transformations should probably not occur here,
# though.
#
# Duplicate subqueries might appear in different parts of the query
# structure, e.g. beneath two aggregates that are joined together, so
# we have to walk the entire query structure.
#
# The default behavior is to only extract into a WITH clause when a
# subquery appears multiple times (for DRY reasons). At some point we
# can implement a more aggressive policy so that subqueries always
# appear in the WITH part of the SELECT statement, if that's what you
# want.
# Find the subqueries, and record them in the passed query context.
subqueries = _extract_subqueries(self)
self.subqueries = []
for expr in subqueries:
# See #173. Might have been extracted already in a parent context.
if not self.context.is_extracted(expr):
self.subqueries.append(expr)
self.context.set_extracted(expr)
def _get_subtables(expr):
subtables = []
def _walk(expr):
op = expr.op()
if isinstance(op, ops.Join):
_walk(op.left)
_walk(op.right)
else:
subtables.append(expr)
_walk(expr)
return subtables
def _all_distinct_roots(subtables):
bases = []
for t in subtables:
base = _blocking_base(t)
for x in bases:
if base.equals(x):
return False
bases.append(base)
return True
def _blocking_base(expr):
node = expr.op()
if isinstance(node, (ir.BlockingTableNode, ops.Join)):
return expr
else:
for arg in expr.op().flat_args():
if isinstance(arg, ir.TableExpr):
return _blocking_base(arg)
def _extract_subqueries(select_stmt):
helper = _ExtractSubqueries(select_stmt)
return helper.get_result()
def _extract_noop(self, expr):
return
class _ExtractSubqueries(object):
# Helper class to make things a little easier
def __init__(self, query, greedy=False):
self.query = query
self.greedy = greedy
# Ordered set that uses object .equals to find keys
self.observed_exprs = util.IbisMap()
self.expr_counts = defaultdict(lambda: 0)
def get_result(self):
if self.query.table_set is not None:
self.visit(self.query.table_set)
for clause in self.query.filters:
self.visit(clause)
to_extract = []
# Read them inside-out, to avoid nested dependency issues
for expr, key in reversed(lzip(self.observed_exprs.keys,
self.observed_exprs.values)):
v = self.expr_counts[key]
if self.greedy or v > 1:
to_extract.append(expr)
return to_extract
def observe(self, expr):
if expr in self.observed_exprs:
key = self.observed_exprs.get(expr)
else:
# this key only needs to be unique because of the IbisMap
key = id(expr.op())
self.observed_exprs.set(expr, key)
self.expr_counts[key] += 1
def _has_been_observed(self, expr):
return expr in self.observed_exprs
def visit(self, expr):
node = expr.op()
method = '_visit_{0}'.format(type(node).__name__)
if hasattr(self, method):
f = getattr(self, method)
f(expr)
elif isinstance(node, ops.Join):
self._visit_join(expr)
elif isinstance(node, ops.PhysicalTable):
self._visit_physical_table(expr)
elif isinstance(node, ops.ValueNode):
for arg in node.flat_args():
if not isinstance(arg, ir.Expr):
continue
self.visit(arg)
else:
raise NotImplementedError(type(node))
def _visit_join(self, expr):
node = expr.op()
self.visit(node.left)
self.visit(node.right)
_visit_physical_table = _extract_noop
def _visit_Exists(self, expr):
node = expr.op()
self.visit(node.foreign_table)
for pred in node.predicates:
self.visit(pred)
_visit_ExistsSubquery = _visit_Exists
_visit_NotExistsSubquery = _visit_Exists
def _visit_Aggregation(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_Distinct(self, expr):
self.observe(expr)
def _visit_Filter(self, expr):
self.visit(expr.op().table)
def _visit_Limit(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_Union(self, expr):
self.observe(expr)
def _visit_Projection(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_SQLQueryResult(self, expr):
self.observe(expr)
def _visit_TableColumn(self, expr):
table = expr.op().table
if not self._has_been_observed(table):
self.visit(table)
def _visit_SelfReference(self, expr):
self.visit(expr.op().table)
def _visit_SortBy(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _foreign_ref_check(query, expr):
checker = _CorrelatedRefCheck(query, expr)
return checker.get_result()
class _CorrelatedRefCheck(object):
def __init__(self, query, expr):
self.query = query
self.ctx = query.context
self.expr = expr
qroots = self.query.table_set._root_tables()
self.query_roots = util.IbisSet.from_list(qroots)
# aliasing required
self.foreign_refs = []
self.has_foreign_root = False
self.has_query_root = False
def get_result(self):
self._visit(self.expr)
return self.has_query_root and self.has_foreign_root
def _visit(self, expr, in_subquery=False):
node = expr.op()
in_subquery = self._is_subquery(node)
for arg in node.flat_args():
if isinstance(arg, ir.TableExpr):
self._visit_table(arg, in_subquery=in_subquery)
elif isinstance(arg, ir.Expr):
self._visit(arg, in_subquery=in_subquery)
else:
continue
def _is_subquery(self, node):
# XXX
if isinstance(node, ops.TableArrayView):
return True
if isinstance(node, ops.TableColumn):
return not self._is_root(node.table)
return False
def _visit_table(self, expr, in_subquery=False):
node = expr.op()
if isinstance(node, (ops.PhysicalTable, ops.SelfReference)):
self._ref_check(node, in_subquery=in_subquery)
for arg in node.flat_args():
if isinstance(arg, ir.Expr):
self._visit(arg, in_subquery=in_subquery)
def _ref_check(self, node, in_subquery=False):
is_aliased = self.ctx.has_ref(node)
if self._is_root(node):
if in_subquery:
self.has_query_root = True
else:
if in_subquery:
self.has_foreign_root = True
if (not is_aliased and
self.ctx.has_ref(node, parent_contexts=True)):
self.ctx.make_alias(node)
elif not self.ctx.has_ref(node):
self.ctx.make_alias(node)
def _is_root(self, what):
if isinstance(what, ir.Expr):
what = what.op()
return what in self.query_roots
def _adapt_expr(expr):
# Non-table expressions need to be adapted to some well-formed table
# expression, along with a way to adapt the results to the desired
# arity (whether array-like or scalar, for example)
#
# Canonical case is scalar values or arrays produced by some reductions
# (simple reductions, or distinct, say)
def as_is(x):
return x
if isinstance(expr, ir.TableExpr):
return expr, as_is
def _scalar_reduce(x):
return isinstance(x, ir.ScalarExpr) and ops.is_reduction(x)
def _get_scalar(field):
def scalar_handler(results):
return results[field][0]
return scalar_handler
if isinstance(expr, ir.ScalarExpr):
if _scalar_reduce(expr):
table_expr, name = _reduction_to_aggregation(
expr, default_name='tmp')
return table_expr, _get_scalar(name)
else:
base_table = ir.find_base_table(expr)
if base_table is None:
# expr with no table refs
return expr.name('tmp'), _get_scalar('tmp')
else:
raise NotImplementedError(expr._repr())
elif isinstance(expr, ir.AnalyticExpr):
return expr.to_aggregation(), as_is
elif isinstance(expr, ir.ExprList):
exprs = expr.exprs()
is_aggregation = True
any_aggregation = False
for x in exprs:
if not _scalar_reduce(x):
is_aggregation = False
else:
any_aggregation = True
if is_aggregation:
table = ir.find_base_table(exprs[0])
return table.aggregate(exprs), as_is
elif not any_aggregation:
return expr, as_is
else:
raise NotImplementedError(expr._repr())
elif isinstance(expr, ir.ArrayExpr):
op = expr.op()
def _get_column(name):
def column_handler(results):
return results[name]
return column_handler
if isinstance(op, ops.TableColumn):
table_expr = op.table
result_handler = _get_column(op.name)
else:
# Something more complicated.
base_table = L.find_source_table(expr)
if isinstance(op, ops.DistinctArray):
expr = op.arg
try:
name = op.arg.get_name()
except Exception:
name = 'tmp'
table_expr = (base_table.projection([expr.name(name)])
.distinct())
result_handler = _get_column(name)
else:
table_expr = base_table.projection([expr.name('tmp')])
result_handler = _get_column('tmp')
return table_expr, result_handler
else:
raise com.TranslationError('Do not know how to execute: {0}'
.format(type(expr)))
def _reduction_to_aggregation(expr, default_name='tmp'):
table = ir.find_base_table(expr)
try:
name = expr.get_name()
named_expr = expr
except:
name = default_name
named_expr = expr.name(default_name)
return table.aggregate([named_expr]), name
class QueryBuilder(object):
select_builder = SelectBuilder
def __init__(self, expr, context=None):
self.expr = expr
if context is None:
context = self._make_context()
self.context = context
@property
def _make_context(self):
raise NotImplementedError
def get_result(self):
op = self.expr.op()
# TODO: any setup / teardown DDL statements will need to be done prior
# to building the result set-generating statements.
if isinstance(op, ops.Union):
query = self._make_union()
else:
query = self._make_select()
return QueryAST(self.context, [query])
def _make_union(self):
op = self.expr.op()
return self._union_class(op.left, op.right,
distinct=op.distinct,
context=self.context)
def _make_select(self):
builder = self.select_builder(self.expr, self.context)
return builder.get_result()
class QueryContext(object):
"""
Records bits of information used during ibis AST to SQL translation
"""
def __init__(self, indent=2, parent=None):
self._table_refs = {}
self.extracted_subexprs = set()
self.subquery_memo = {}
self.indent = indent
self.parent = parent
self.always_alias = False
self.query = None
self._table_key_memo = {}
def _compile_subquery(self, expr):
sub_ctx = self.subcontext()
return self._to_sql(expr, sub_ctx)
def _to_sql(self, expr, ctx):
raise NotImplementedError
@property
def top_context(self):
if self.parent is None:
return self
else:
return self.parent.top_context
def set_always_alias(self):
self.always_alias = True
def get_compiled_expr(self, expr):
this = self.top_context
key = self._get_table_key(expr)
if key in this.subquery_memo:
return this.subquery_memo[key]
op = expr.op()
if isinstance(op, ops.SQLQueryResult):
result = op.query
else:
result = self._compile_subquery(expr)
this.subquery_memo[key] = result
return result
def make_alias(self, expr):
i = len(self._table_refs)
key = self._get_table_key(expr)
# Get total number of aliases up and down the tree at this point; if we
# find the table prior-aliased along the way, however, we reuse that
# alias
ctx = self
while ctx.parent is not None:
ctx = ctx.parent
if key in ctx._table_refs:
alias = ctx._table_refs[key]
self.set_ref(expr, alias)
return
i += len(ctx._table_refs)
alias = 't%d' % i
self.set_ref(expr, alias)
def need_aliases(self):
return self.always_alias or len(self._table_refs) > 1
def has_ref(self, expr, parent_contexts=False):
key = self._get_table_key(expr)
return self._key_in(key, '_table_refs',
parent_contexts=parent_contexts)
def set_ref(self, expr, alias):
key = self._get_table_key(expr)
self._table_refs[key] = alias
def get_ref(self, expr):
"""
Get the alias being used throughout a query to refer to a particular
table or inline view
"""
return self._get_table_item('_table_refs', expr)
def is_extracted(self, expr):
key = self._get_table_key(expr)
return key in self.top_context.extracted_subexprs
def set_extracted(self, expr):
key = self._get_table_key(expr)
self.extracted_subexprs.add(key)
self.make_alias(expr)
def subcontext(self):
return type(self)(indent=self.indent, parent=self)
# Maybe temporary hacks for correlated / uncorrelated subqueries
def set_query(self, query):
self.query = query
def is_foreign_expr(self, expr):
from ibis.expr.analysis import ExprValidator
# The expression isn't foreign to us. For example, the parent table set
# in a correlated WHERE subquery
if self.has_ref(expr, parent_contexts=True):
return False
exprs = [self.query.table_set] + self.query.select_set
validator = ExprValidator(exprs)
return not validator.validate(expr)
def _get_table_item(self, item, expr):
key = self._get_table_key(expr)
top = self.top_context
if self.is_extracted(expr):
return getattr(top, item).get(key)
return getattr(self, item).get(key)
def _get_table_key(self, table):
if isinstance(table, ir.TableExpr):
table = table.op()
k = id(table)
if k in self._table_key_memo:
return self._table_key_memo[k]
else:
val = table._repr()
self._table_key_memo[k] = val
return val
def _key_in(self, key, memo_attr, parent_contexts=False):
if key in getattr(self, memo_attr):
return True
ctx = self
while parent_contexts and ctx.parent is not None:
ctx = ctx.parent
if key in getattr(ctx, memo_attr):
return True
return False
class ExprTranslator(object):
_rewrites = {}
def __init__(self, expr, context=None, named=False, permit_subquery=False):
self.expr = expr
self.permit_subquery = permit_subquery
if context is None:
context = self._context_class()
self.context = context
# For now, governing whether the result will have a name
self.named = named
@property
def _context_class(self):
raise NotImplementedError
def get_result(self):
"""
Build compiled SQL expression from the bottom up and return as a string
"""
translated = self.translate(self.expr)
if self._needs_name(self.expr):
# TODO: this could fail in various ways
name = self.expr.get_name()
translated = self.name(translated, name)
return translated
def _needs_name(self, expr):
if not self.named:
return False
op = expr.op()
if isinstance(op, ops.TableColumn):
# This column has been given an explicitly different name
if expr.get_name() != op.name:
return True
return False
if expr.get_name() is ir.unnamed:
return False
return True
def translate(self, expr):
# The operation node type the typed expression wraps
op = expr.op()
if type(op) in self._rewrites and type(op) not in self._registry:
expr = self._rewrites[type(op)](expr)
op = expr.op()
# TODO: use op MRO for subclasses instead of this isinstance spaghetti
if isinstance(op, ir.Parameter):
return self._trans_param(expr)
elif isinstance(op, ops.TableNode):
# HACK/TODO: revisit for more complex cases
return '*'
elif type(op) in self._registry:
formatter = self._registry[type(op)]
return formatter(self, expr)
else:
raise com.TranslationError('No translator rule for {0}'
.format(type(op)))
def _trans_param(self, expr):
raise NotImplementedError
@classmethod
def rewrites(cls, klass, f=None):
def decorator(f):
cls._rewrites[klass] = f
if f is None:
return decorator
else:
decorator(f)
rewrites = ExprTranslator.rewrites
@rewrites(analytics.Bucket)
def _bucket(expr):
import operator
op = expr.op()
stmt = ibis.case()
if op.closed == 'left':
l_cmp = operator.le
r_cmp = operator.lt
else:
l_cmp = operator.lt
r_cmp = operator.le
user_num_buckets = len(op.buckets) - 1
bucket_id = 0
if op.include_under:
if user_num_buckets > 0:
cmp = operator.lt if op.close_extreme else r_cmp
else:
cmp = operator.le if op.closed == 'right' else operator.lt
stmt = stmt.when(cmp(op.arg, op.buckets[0]), bucket_id)
bucket_id += 1
for j, (lower, upper) in enumerate(zip(op.buckets, op.buckets[1:])):
if (op.close_extreme and
((op.closed == 'right' and j == 0) or
(op.closed == 'left' and j == (user_num_buckets - 1)))):
stmt = stmt.when((lower <= op.arg) & (op.arg <= upper),
bucket_id)
else:
stmt = stmt.when(l_cmp(lower, op.arg) & r_cmp(op.arg, upper),
bucket_id)
bucket_id += 1
if op.include_over:
if user_num_buckets > 0:
cmp = operator.lt if op.close_extreme else l_cmp
else:
cmp = operator.lt if op.closed == 'right' else operator.le
stmt = stmt.when(cmp(op.buckets[-1], op.arg), bucket_id)
bucket_id += 1
return stmt.end().name(expr._name)
@rewrites(analytics.CategoryLabel)
def _category_label(expr):
op = expr.op()
stmt = op.args[0].case()
for i, label in enumerate(op.labels):
stmt = stmt.when(i, label)
if op.nulls is not None:
stmt = stmt.else_(op.nulls)
return stmt.end().name(expr._name)
@rewrites(ops.Any)
def _any_expand(expr):
arg = expr.op().args[0]
return arg.sum() > 0
@rewrites(ops.NotAny)
def _notany_expand(expr):
arg = expr.op().args[0]
return arg.sum() == 0
@rewrites(ops.All)
def _all_expand(expr):
arg = expr.op().args[0]
t = ir.find_base_table(arg)
return arg.sum() == t.count()
@rewrites(ops.NotAll)
def _notall_expand(expr):
arg = expr.op().args[0]
t = ir.find_base_table(arg)
return arg.sum() < t.count()
class DDL(object):
pass
class Select(DDL):
"""
A SELECT statement which, after execution, might yield back to the user a
table, array/list, or scalar value, depending on the expression that
generated it
"""
def __init__(self, table_set, select_set,
subqueries=None, where=None, group_by=None, having=None,
order_by=None, limit=None,
distinct=False, indent=2,
result_handler=None, parent_expr=None,
context=None):
self.context = context
self.select_set = select_set
self.table_set = table_set
self.distinct = distinct
self.parent_expr = parent_expr
self.where = where or []
# Group keys and post-predicates for aggregations
self.group_by = group_by or []
self.having = having or []
self.order_by = order_by or []
self.limit = limit
self.subqueries = subqueries or []
self.indent = indent
self.result_handler = result_handler
translator = None
def _translate(self, expr, context=None, named=False,
permit_subquery=False):
if context is None:
context = self.context
translator = self.translator(expr, context=context,
named=named,
permit_subquery=permit_subquery)
return translator.get_result()
def equals(self, other):
if not isinstance(other, Select):
return False
this_exprs = self._all_exprs()
other_exprs = other._all_exprs()
if self.limit != other.limit:
return False
for x, y in zip(this_exprs, other_exprs):
if not x.equals(y):
return False
return True
def _all_exprs(self):
# Gnarly, maybe we can improve this somehow
expr_attrs = ['select_set', 'table_set', 'where', 'group_by', 'having',
'order_by', 'subqueries']
exprs = []
for attr in expr_attrs:
val = getattr(self, attr)
if isinstance(val, list):
exprs.extend(val)
else:
exprs.append(val)
return exprs
class TableSetFormatter(object):
def __init__(self, parent, expr, indent=2):
self.parent = parent
self.context = parent.context
self.expr = expr
self.indent = indent
self.join_tables = []
self.join_types = []
self.join_predicates = []
def _translate(self, expr):
return self.parent._translate(expr, context=self.context)
def _walk_join_tree(self, op):
left = op.left.op()
right = op.right.op()
if util.all_of([left, right], ops.Join):
raise NotImplementedError('Do not support joins between '
'joins yet')
self._validate_join_predicates(op.predicates)
jname = self._get_join_type(op)
# Read off tables and join predicates left-to-right in
# depth-first order
if isinstance(left, ops.Join):
self._walk_join_tree(left)
self.join_tables.append(self._format_table(op.right))
self.join_types.append(jname)
self.join_predicates.append(op.predicates)
elif isinstance(right, ops.Join):
# When rewrites are possible at the expression IR stage, we should
# do them. Otherwise subqueries might be necessary in some cases
# here
raise NotImplementedError('not allowing joins on right '
'side yet')
else:
# Both tables
self.join_tables.append(self._format_table(op.left))
self.join_tables.append(self._format_table(op.right))
self.join_types.append(jname)
self.join_predicates.append(op.predicates)
# Placeholder; revisit when supporting other databases
_non_equijoin_supported = True
def _validate_join_predicates(self, predicates):
for pred in predicates:
op = pred.op()
if (not isinstance(op, ops.Equals) and
not self._non_equijoin_supported):
raise com.TranslationError('Non-equality join predicates, '
'i.e. non-equijoins, are not '
'supported')
class Union(DDL):
def __init__(self, left_table, right_table, distinct=False,
context=None):
self.context = context
self.left = left_table
self.right = right_table
self.distinct = distinct
|
glenioborges/ibis
|
ibis/sql/compiler.py
|
Python
|
apache-2.0
| 43,537
|
[
"VisIt"
] |
ea415545cff6a2a34e66bfbc5e8bfe513223e3ca6d7199ccea7d6a3aeb1545ad
|
from optparse import OptionParser
import statsmodels.api as sm
import scipy as sp
from scipy import linalg
from scipy import stats
docstr = """
Demonstrates l1 regularization for likelihood models.
Use different models by setting mode = mnlogit, logit, or probit.
Examples
-------
$ python demo.py --get_l1_slsqp_results logit
>>> import demo
>>> demo.run_demo('logit')
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The solvers are slower than standard Newton, and sometimes have
convergence issues Nonetheless, the final solution makes sense and
is often better than the ML solution.
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
def main():
"""
Provides a CLI for the demo.
"""
usage = "usage: %prog [options] mode"
usage += '\n'+docstr
parser = OptionParser(usage=usage)
# base_alpha
parser.add_option("-a", "--base_alpha",
help="Size of regularization param (the param actully used will "\
"automatically scale with data size in this demo) "\
"[default: %default]",
dest='base_alpha', action='store', type='float', default=0.01)
# num_samples
parser.add_option("-N", "--num_samples",
help="Number of data points to generate for fit "\
"[default: %default]",
dest='N', action='store', type='int', default=500)
# get_l1_slsqp_results
parser.add_option("--get_l1_slsqp_results",
help="Do an l1 fit using slsqp. [default: %default]", \
action="store_true",dest='get_l1_slsqp_results', default=False)
# get_l1_cvxopt_results
parser.add_option("--get_l1_cvxopt_results",
help="Do an l1 fit using cvxopt. [default: %default]", \
action="store_true",dest='get_l1_cvxopt_results', default=False)
# num_nonconst_covariates
parser.add_option("--num_nonconst_covariates",
help="Number of covariates that are not constant "\
"(a constant will be prepended) [default: %default]",
dest='num_nonconst_covariates', action='store',
type='int', default=10)
# noise_level
parser.add_option("--noise_level",
help="Level of the noise relative to signal [default: %default]",
dest='noise_level', action='store', type='float',
default=0.2)
# cor_length
parser.add_option("--cor_length",
help="Correlation length of the (Gaussian) independent variables"\
"[default: %default]",
dest='cor_length', action='store', type='float',
default=2)
# num_zero_params
parser.add_option("--num_zero_params",
help="Number of parameters equal to zero for every target in "\
"logistic regression examples. [default: %default]",
dest='num_zero_params', action='store', type='int',
default=8)
# num_targets
parser.add_option("-J", "--num_targets",
help="Number of choices for the endogenous response in "\
"multinomial logit example [default: %default]",
dest='num_targets', action='store', type='int', default=3)
# print_summaries
parser.add_option("-s", "--print_summaries",
help="Print the full fit summary. [default: %default]", \
action="store_true",dest='print_summaries', default=False)
# save_arrays
parser.add_option("--save_arrays",
help="Save exog/endog/true_params to disk for future use. "\
"[default: %default]",
action="store_true",dest='save_arrays', default=False)
# load_old_arrays
parser.add_option("--load_old_arrays",
help="Load exog/endog/true_params arrays from disk. "\
"[default: %default]",
action="store_true",dest='load_old_arrays', default=False)
(options, args) = parser.parse_args()
assert len(args) == 1
mode = args[0].lower()
run_demo(mode, **options.__dict__)
def run_demo(mode, base_alpha=0.01, N=500, get_l1_slsqp_results=False,
get_l1_cvxopt_results=False, num_nonconst_covariates=10,
noise_level=0.2, cor_length=2, num_zero_params=8, num_targets=3,
print_summaries=False, save_arrays=False, load_old_arrays=False):
"""
Run the demo and print results.
Parameters
----------
mode : str
either 'logit', 'mnlogit', or 'probit'
base_alpha : Float
Size of regularization param (the param actually used will
automatically scale with data size in this demo)
N : int
Number of data points to generate for fit
get_l1_slsqp_results : bool,
Do an l1 fit using slsqp.
get_l1_cvxopt_results : bool
Do an l1 fit using cvxopt
num_nonconst_covariates : int
Number of covariates that are not constant
(a constant will be prepended)
noise_level : float (non-negative)
Level of the noise relative to signal
cor_length : float (non-negative)
Correlation length of the (Gaussian) independent variables
num_zero_params : int
Number of parameters equal to zero for every target in logistic
regression examples.
num_targets : int
Number of choices for the endogenous response in multinomial logit
example
print_summaries : bool
print the full fit summary.
save_arrays : bool
Save exog/endog/true_params to disk for future use.
load_old_arrays
Load exog/endog/true_params arrays from disk.
"""
if mode != 'mnlogit':
print("Setting num_targets to 2 since mode != 'mnlogit'")
num_targets = 2
models = {
'logit': sm.Logit, 'mnlogit': sm.MNLogit, 'probit': sm.Probit}
endog_funcs = {
'logit': get_logit_endog, 'mnlogit': get_logit_endog,
'probit': get_probit_endog}
# The regularization parameter
# Here we scale it with N for simplicity. In practice, you should
# use cross validation to pick alpha
alpha = base_alpha * N * sp.ones((num_nonconst_covariates+1, num_targets-1))
alpha[0,:] = 0 # Do not regularize the intercept
#### Make the data and model
exog = get_exog(N, num_nonconst_covariates, cor_length)
exog = sm.add_constant(exog)
true_params = sp.rand(num_nonconst_covariates+1, num_targets-1)
if num_zero_params:
true_params[-num_zero_params:, :] = 0
endog = endog_funcs[mode](true_params, exog, noise_level)
endog, exog, true_params = save_andor_load_arrays(
endog, exog, true_params, save_arrays, load_old_arrays)
model = models[mode](endog, exog)
#### Get the results and print
results = run_solvers(model, true_params, alpha,
get_l1_slsqp_results, get_l1_cvxopt_results, print_summaries)
summary_str = get_summary_str(results, true_params, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries)
print(summary_str)
def run_solvers(model, true_params, alpha, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries):
"""
Runs the solvers using the specified settings and returns a result string.
Works the same for any l1 penalized likelihood model.
"""
results = {}
#### Train the models
# Get ML results
results['results_ML'] = model.fit(method='newton')
# Get l1 results
start_params = results['results_ML'].params.ravel(order='F')
if get_l1_slsqp_results:
results['results_l1_slsqp'] = model.fit_regularized(
method='l1', alpha=alpha, maxiter=1000,
start_params=start_params, retall=True)
if get_l1_cvxopt_results:
results['results_l1_cvxopt_cp'] = model.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha, maxiter=50,
start_params=start_params, retall=True, feastol=1e-5)
return results
def get_summary_str(results, true_params, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries):
"""
Gets a string summarizing the results.
"""
#### Extract specific results
results_ML = results['results_ML']
RMSE_ML = get_RMSE(results_ML, true_params)
if get_l1_slsqp_results:
results_l1_slsqp = results['results_l1_slsqp']
if get_l1_cvxopt_results:
results_l1_cvxopt_cp = results['results_l1_cvxopt_cp']
#### Format summaries
# Short summary
print_str = '\n\n=========== Short Error Summary ============'
print_str += '\n\n The maximum likelihood fit RMS error = %.4f' % RMSE_ML
if get_l1_slsqp_results:
RMSE_l1_slsqp = get_RMSE(results_l1_slsqp, true_params)
print_str += '\n The l1_slsqp fit RMS error = %.4f' % RMSE_l1_slsqp
if get_l1_cvxopt_results:
RMSE_l1_cvxopt_cp = get_RMSE(results_l1_cvxopt_cp, true_params)
print_str += '\n The l1_cvxopt_cp fit RMS error = %.4f' % RMSE_l1_cvxopt_cp
# Parameters
print_str += '\n\n\n============== Parameters ================='
print_str += "\n\nTrue parameters: \n%s" % true_params
# Full summary
if print_summaries:
print_str += '\n' + results_ML.summary().as_text()
if get_l1_slsqp_results:
print_str += '\n' + results_l1_slsqp.summary().as_text()
if get_l1_cvxopt_results:
print_str += '\n' + results_l1_cvxopt_cp.summary().as_text()
else:
print_str += '\n\nThe maximum likelihood params are \n%s' % results_ML.params
if get_l1_slsqp_results:
print_str += '\n\nThe l1_slsqp params are \n%s' % results_l1_slsqp.params
if get_l1_cvxopt_results:
print_str += '\n\nThe l1_cvxopt_cp params are \n%s' % \
results_l1_cvxopt_cp.params
# Return
return print_str
def save_andor_load_arrays(
endog, exog, true_params, save_arrays, load_old_arrays):
if save_arrays:
sp.save('endog.npy', endog)
sp.save('exog.npy', exog)
sp.save('true_params.npy', true_params)
if load_old_arrays:
endog = sp.load('endog.npy')
exog = sp.load('exog.npy')
true_params = sp.load('true_params.npy')
return endog, exog, true_params
def get_RMSE(results, true_params):
"""
Gets the (normalized) root mean square error.
"""
diff = results.params.reshape(true_params.shape) - true_params
raw_RMSE = sp.sqrt(((diff)**2).sum())
param_norm = sp.sqrt((true_params**2).sum())
return raw_RMSE / param_norm
def get_logit_endog(true_params, exog, noise_level):
"""
Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level.
"""
N = exog.shape[0]
### Create the probability of entering the different classes,
### given exog and true_params
Xdotparams = sp.dot(exog, true_params)
noise = noise_level * sp.randn(*Xdotparams.shape)
eXB = sp.column_stack((sp.ones(len(Xdotparams)), sp.exp(Xdotparams)))
class_probabilities = eXB / eXB.sum(1)[:, None]
### Create the endog
cdf = class_probabilities.cumsum(axis=1)
endog = sp.zeros(N)
for i in range(N):
endog[i] = sp.searchsorted(cdf[i, :], sp.rand())
return endog
def get_probit_endog(true_params, exog, noise_level):
"""
Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level.
"""
N = exog.shape[0]
### Create the probability of entering the different classes,
### given exog and true_params
Xdotparams = sp.dot(exog, true_params)
noise = noise_level * sp.randn(*Xdotparams.shape)
### Create the endog
cdf = stats.norm._cdf(-Xdotparams)
endog = sp.zeros(N)
for i in range(N):
endog[i] = sp.searchsorted(cdf[i, :], sp.rand())
return endog
def get_exog(N, num_nonconst_covariates, cor_length):
"""
Returns an exog array with correlations determined by cor_length.
The covariance matrix of exog will have (asymptotically, as
:math:'N\\to\\inf')
.. math:: Cov[i,j] = \\exp(-|i-j| / cor_length)
Higher cor_length makes the problem more ill-posed, and easier to screw
up with noise.
BEWARE: With very long correlation lengths, you often get a singular KKT
matrix (during the l1_cvxopt_cp fit)
"""
## Create the noiseless exog
uncorrelated_exog = sp.randn(N, num_nonconst_covariates)
if cor_length == 0:
exog = uncorrelated_exog
else:
cov_matrix = sp.zeros((num_nonconst_covariates, num_nonconst_covariates))
j = sp.arange(num_nonconst_covariates)
for i in range(num_nonconst_covariates):
cov_matrix[i,:] = sp.exp(-sp.fabs(i-j) / cor_length)
chol = linalg.cholesky(cov_matrix) # cov_matrix = sp.dot(chol.T, chol)
exog = sp.dot(uncorrelated_exog, chol)
## Return
return exog
if __name__ == '__main__':
main()
|
statsmodels/statsmodels
|
statsmodels/examples/l1_demo/demo.py
|
Python
|
bsd-3-clause
| 13,470
|
[
"Gaussian"
] |
9a6e36b1400cfcb7bd24a8bb09bf57e506a4efb12b9ed25e0b178523b5ec4963
|
"""
Cubes
=====
Tools to deal with spectroscopic data cubes.
Some features in Cubes require additional packages:
* smoothing - requires agpy_\'s smooth and parallel_map routines
* `pyregion <git://github.com/astropy/pyregion.git>`_
The 'grunt work' is performed by the :py:mod:`cubes` module
"""
from __future__ import print_function
import time
import sys
import traceback
import numpy as np
import types
import copy
import itertools
from ..specwarnings import warn,PyspeckitWarning
import astropy
from astropy.io import fits
from astropy import log
from astropy import wcs
from astropy import units
from astropy.utils.console import ProgressBar
from six import iteritems, string_types
from functools import wraps
# import parent package
from .. import spectrum
from ..spectrum import smooth
from ..spectrum.units import (generate_xarr, SpectroscopicAxis,
SpectroscopicAxes)
from ..parallel_map import parallel_map
from ..spectrum import history
# import local things
from . import mapplot
from . import cubes
def not_for_cubes(func):
@wraps(func)
def wrapper(*args):
warn("This operation ({0}) operates on the spectrum selected "
"from the cube, e.g. with `set_spectrum` or `set_apspec`"
", it does not operate on the whole cube.", PyspeckitWarning)
return func(*args)
return wrapper
class Cube(spectrum.Spectrum):
def __init__(self, filename=None, cube=None, xarr=None, xunit=None,
errorcube=None, header=None, x0=0, y0=0,
maskmap=None,
**kwargs):
"""
A pyspeckit Cube object. Can be created from a FITS file on disk or
from an array or a `spectral_cube.SpectralCube` object. If an array
is used to insantiate the cube, the `xarr` keyword must be given,
specifying the X-axis units
Parameters
----------
filename : str, optional
The name of a FITS file to open and read from. Must be 3D
cube : `np.ndarray`, `spectral_cube.SpectralCube`, or \
`astropy.units.Quantity`
The data from which to instantiate a Cube object. If it is
an array or an astropy Quantity (which is an array with attached
units), the X-axis must be specified. If this is given as a
SpectralCube object, the X-axis and units should be handled
automatically.
xarr : `np.ndarray` or `astropy.units.Quantity`, optional
The X-axis of the spectra from each cube. This actually
corresponds to axis 0, or what we normally refer to as the Z-axis
of the cube, but it indicates the X-axis in a plot of intensity vs
wavelength. The units for this array are specified in the `xunit`
keyword unless a `~astropy.units.Quantity` is given.
xunit : str, optional
The unit of the ``xarr`` array if ``xarr`` is given as a numpy
array
errorcube : `np.ndarray`, `spectral_cube.SpectralCube`,\
or `~astropy.units.Quantity`, optional
A cube with the same shape as the input cube providing the 1-sigma
error for each voxel. This can be specified more efficiently as an
error map for most use cases, but that approach has not yet been
implemented. However, you can pass a 2D error map to `fiteach`.
header : `fits.Header` or dict, optional
The header associated with the data. Only needed if the cube is
given as an array or a quantity.
x0, y0 : int
The initial spectrum to use. The `Cube` object can be treated as
a `pyspeckit.Spectrum` object, with all the associated tools
(plotter, fitter) using the `set_spectrum` method to select a pixel
from the cube to plot and fit. However, it is generally more sensible
to extract individual spectra and treat them separately using the
`get_spectrum` method, so these keywords MAY BE DEPRECATED in the
future.
maskmap : `np.ndarray`, optional
A boolean mask map, where ``True`` implies that the data are good.
This will be used for both plotting using `mapplot` and fitting
using `fiteach`.
"""
if filename is not None:
self.load_fits(filename)
return
else:
if hasattr(cube, 'spectral_axis'):
# Load from a SpectralCube instance
self.cube = cube.hdu.data
if (cube.unit in ('undefined', units.dimensionless_unscaled)
and 'BUNIT' in cube._meta):
self.unit = cube._meta['BUNIT']
else:
self.unit = cube.unit
log.debug("Self.unit: {0}".format(self.unit))
if xarr is None:
if cube.spectral_axis.flags['OWNDATA']:
xarr = SpectroscopicAxis(cube.spectral_axis,
unit=cube.spectral_axis.unit,
refX=cube.wcs.wcs.restfrq,
refX_unit='Hz')
else:
xarr = SpectroscopicAxis(cube.spectral_axis.copy(),
unit=cube.spectral_axis.unit,
refX=cube.wcs.wcs.restfrq,
refX_unit='Hz')
if header is None:
header = cube.header
elif hasattr(cube, 'unit'):
self.cube = cube.value
self.unit = cube.unit
else:
self.cube = cube
if hasattr(errorcube, 'spectral_axis'):
# Load from a SpectralCube instance
self.errorcube = errorcube.hdu.data
elif hasattr(errorcube, 'unit'):
self.errorcube = errorcube.value
else:
self.errorcube = errorcube
if hasattr(xarr, 'flags'):
log.debug("XARR flags: {0}".format(xarr.flags))
self.xarr = generate_xarr(xarr, unit=xunit)
if hasattr(xarr, 'flags'):
log.debug("self.xarr flags: {0}".format(xarr.flags))
self.header = header
self.error = None
if self.cube is not None:
self.data = self.cube[:,int(y0),int(x0)]
if not hasattr(self, '_unit'):
self.unit = units.dimensionless_unscaled
log.debug("Self.unit before header: {0}".format(self.unit))
if self.header is not None:
self.parse_header(self.header)
else:
log.debug("self.header is None: {0}".format(self.header))
self.unit = 'undefined'
self.header = fits.Header()
log.debug("Self.unit after header: {0}".format(self.unit))
if maskmap is not None:
if maskmap.ndim != 2:
raise ValueError("Mask map must be two-dimensional.")
self.maskmap = maskmap
else:
self.maskmap = np.ones(self.cube.shape[1:],dtype='bool')
if isinstance(filename,str):
self.fileprefix = filename.rsplit('.', 1)[0] # Everything prior to .fits or .txt
else:
self.fileprefix = "pyfitsHDU"
self.plotter = spectrum.plotters.Plotter(self)
self._register_fitters()
self.specfit = spectrum.fitters.Specfit(self,Registry=self.Registry)
self.baseline = spectrum.baseline.Baseline(self)
self.speclines = spectrum.speclines
# Initialize writers
self.writer = {}
for writer in spectrum.writers.writers:
self.writer[writer] = spectrum.writers.writers[writer](self)
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
self._modelcube = None
if self.header:
self.wcs = wcs.WCS(self.header)
self.wcs.wcs.fix()
self._spectral_axis_number = self.wcs.wcs.spec+1
self._first_cel_axis_num = np.where(self.wcs.wcs.axis_types // 1000 == 2)[0][0]+1
# TODO: Improve this!!!
self.system = ('galactic'
if ('CTYPE{0}'.format(self._first_cel_axis_num)
in self.header and 'GLON' in
self.header['CTYPE{0}'.format(self._first_cel_axis_num)])
else 'celestial')
else:
self._spectral_axis_number = 2
self._first_cel_axis_num = 0
self.system = 'PIXEL'
self.mapplot = mapplot.MapPlotter(self)
def load_fits(self, fitsfile):
try:
from spectral_cube import SpectralCube
except ImportError:
raise ImportError("Could not import spectral_cube. As of pyspeckit"
" 0.17, spectral_cube is required for cube reading. "
"It can be pip installed or acquired from "
"spectral-cube.rtfd.org.")
mycube = SpectralCube.read(fitsfile)
return self.load_spectral_cube(mycube)
def load_spectral_cube(self, cube):
"""
Load the cube from a spectral_cube.SpectralCube object
"""
self.__init__(cube=cube)
def __repr__(self):
return (r'<Cube object over spectral range %6.5g :'
' %6.5g %s and flux range = [%2.1f, %2.1f]'
' %s with shape %r at %s>' %
(self.xarr.min().value, self.xarr.max().value, self.xarr.unit,
self.data.min(), self.data.max(), self.unit, self.cube.shape,
str(hex(self.__hash__()))))
def copy(self,deep=True):
"""
Create a copy of the spectral cube with its own plotter, fitter, etc.
Useful for, e.g., comparing smoothed to unsmoothed data
"""
newcube = copy.copy(self)
newcube.header = copy.copy(self.header)
deep_attr_lst = ['xarr', 'data', 'cube', 'maskmap',
'error', 'errorcube']
if deep:
for attr in deep_attr_lst:
setattr(newcube, attr, copy.copy(getattr(self, attr)))
if hasattr(self, 'wcs'):
newcube.wcs = self.wcs.deepcopy()
newcube.header = self.header.copy()
newcube.plotter = self.plotter.copy(parent=newcube)
newcube._register_fitters()
newcube.specfit = self.specfit.copy(parent=newcube)
newcube.specfit.Spectrum.plotter = newcube.plotter
newcube.baseline = self.baseline.copy(parent=newcube)
newcube.baseline.Spectrum.plotter = newcube.plotter
newcube.mapplot = self.mapplot.copy(parent=newcube)
newcube.mapplot.Cube = newcube
return newcube
def _update_header_from_xarr(self):
"""Uses SpectroscopiAxis' _make_header method to update Cube header"""
self.header['NAXIS3'] = self.xarr.size
self.xarr._make_header()
sp_naxis = self._spectral_axis_number
# change keywords in xarr._make_header from, e.g., CRPIX1 to CRPIX3
newhead = {(key.replace('1', str(sp_naxis))
if key.endswith('1') else key): val
for key, val in iteritems(self.xarr.wcshead)}
for key, val in iteritems(newhead):
if isinstance(val, units.Quantity):
newhead[key] = val.value
elif (isinstance(val, units.CompositeUnit)
or isinstance(val, units.Unit)):
newhead[key] = val.to_string()
log.debug("Updating header: {}: {}".format(key, val))
self.header.update(newhead)
def slice(self, start=None, stop=None, unit='pixel', preserve_fits=False,
copy=True, update_header=False):
"""
Slice a cube along the spectral axis
(equivalent to "spectral_slab" from the spectral_cube package)
Parameters
----------
start : numpy.float or int
start of slice
stop : numpy.float or int
stop of slice
unit : str
allowed values are any supported physical unit, 'pixel'
update_header : bool
modifies the header of the spectral cube according to the slice
"""
x_in_units = self.xarr.as_unit(unit)
start_ind = x_in_units.x_to_pix(start)
stop_ind = x_in_units.x_to_pix(stop)
if start_ind > stop_ind:
start_ind, stop_ind = stop_ind, start_ind
spectrum_slice = slice(start_ind,stop_ind)
if not copy:
raise NotImplementedError("Must copy when slicing a cube.")
newcube = self.copy()
newcube.cube = newcube.cube[spectrum_slice,:,:]
if hasattr(newcube,'errcube'):
newcube.errcube = newcube.errcube[spectrum_slice,:,:]
newcube.data = newcube.data[spectrum_slice]
if newcube.error is not None:
newcube.error = newcube.error[spectrum_slice]
newcube.xarr = newcube.xarr[spectrum_slice]
# create new specfit / baseline instances (otherwise they'll be the wrong length)
newcube._register_fitters()
newcube.baseline = spectrum.baseline.Baseline(newcube)
newcube.specfit = spectrum.fitters.Specfit(newcube,Registry=newcube.Registry)
if preserve_fits:
newcube.specfit.modelpars = self.specfit.modelpars
newcube.specfit.parinfo = self.specfit.parinfo
newcube.baseline.baselinepars = self.baseline.baselinepars
newcube.baseline.order = self.baseline.order
# modify the header in the new cube
if update_header:
newcube._update_header_from_xarr()
# create a new wcs instance from the updated header
newcube.wcs = wcs.WCS(newcube.header)
newcube.wcs.wcs.fix()
newcube._spectral_axis_number = newcube.wcs.wcs.spec + 1
newcube._first_cel_axis_num = np.where(newcube.wcs.wcs.axis_types
// 1000 == 2)[0][0] + 1
return newcube
def __getitem__(self, indx):
"""
If [] is used on a cube, slice on the cube and use
the first dimension to slice on the xarr and the data
"""
return Cube(xarr=self.xarr.__getitem__(indx[0]),
cube=self.cube[indx],
errorcube=self.errorcube[indx] if self.errorcube else None,
maskmap=self.maskmap[indx[1:]] if self.maskmap is not None else None,
header=self.header
)
def set_spectrum(self, x, y):
self.data = self.cube[:,int(y),int(x)]
if self.errorcube is not None:
self.error = self.errorcube[:,int(y),int(x)]
def plot_spectrum(self, x, y, plot_fit=False, **kwargs):
"""
Fill the .data array with a real spectrum and plot it
"""
self.set_spectrum(x,y)
if self.plot_special is None:
self.plotter(**kwargs)
if plot_fit:
self.plot_fit(x,y)
self.plotted_spectrum = self
else:
sp = self.get_spectrum(x,y)
sp.plot_special = types.MethodType(self.plot_special, sp)
combined_kwargs = dict(kwargs.items())
combined_kwargs.update(self.plot_special_kwargs)
self._spdict = sp.plot_special(**combined_kwargs)
self.plotted_spectrum = sp
self.plotter = sp.plotter
self.plotter.refresh = lambda: [spi.plotter.refresh()
for spi in self._spdict.values()]
self.specfit.modelplot = [comp
for spi in self._spdict.values()
for comp in spi.specfit.modelplot]
self.specfit._plotted_components = [comp
for spi in self._spdict.values()
for comp in spi.specfit._plotted_components]
def plot_fit(self, x, y, silent=False, **kwargs):
"""
If fiteach has been run, plot the best fit at the specified location
Parameters
----------
x : int
y : int
The x, y coordinates of the pixel (indices 2 and 1 respectively in
numpy notation)
"""
if not hasattr(self,'parcube'):
if not silent:
log.info("Must run fiteach before plotting a fit. "
"If you want to fit a single spectrum, "
"use plot_spectrum() and specfit() directly.")
return
if self.plot_special is not None:
# don't try to overplot a fit on a "special" plot
# this is already handled in plot_spectrum
return
if not self.has_fit[int(y), int(x)]:
# no fit to plot
return
self.specfit.modelpars = self.parcube[:,int(y),int(x)]
if np.any(np.isnan(self.specfit.modelpars)):
log.exception("Attempted to plot a model with NaN parameters.")
return
self.specfit.npeaks = self.specfit.fitter.npeaks
self.specfit.model = self.specfit.fitter.n_modelfunc(self.specfit.modelpars,
**self.specfit.fitter.modelfunc_kwargs)(self.xarr)
# set the parinfo values correctly for annotations
self.specfit.parinfo.values = self.parcube[:,int(y),int(x)]
self.specfit.parinfo.errors = self.errcube[:,int(y),int(x)]
self.specfit.fitter.parinfo.values = self.parcube[:,int(y),int(x)]
self.specfit.fitter.parinfo.errors = self.errcube[:,int(y),int(x)]
#for pi,p,e in zip(self.specfit.parinfo,
# self.specfit.modelpars,
# self.errcube[:,int(y),int(x)]):
# try:
# pi['value'] = p
# pi['error'] = e
# except ValueError:
# # likely to happen for failed fits
# pass
self.specfit.plot_fit(**kwargs)
def plot_apspec(self, aperture, coordsys=None, reset_ylimits=True,
wunit='arcsec',
method='mean', **kwargs):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube coordinates)
Parameters
----------
aperture : list
A list of aperture parameters, e.g.
* For a circular aperture, len(ap)=3:
+ ``ap = [xcen,ycen,radius]``
* For an elliptical aperture, len(ap)=5:
+ ``ap = [xcen,ycen,height,width,PA]``
coordsys : None or str
The coordinate system of the aperture (e.g., galactic, fk5, None
for pixel)
method : 'mean' or 'sum'
Either average over parellel spectra or sum them.
"""
if self.plot_special is None:
self.set_apspec(aperture, coordsys=coordsys, method=method)
self.plotter(reset_ylimits=reset_ylimits, **kwargs)
else:
#self.plot_special(reset_ylimits=reset_ylimits, **dict(kwargs.items()+self.plot_special_kwargs.items()))
sp = self.get_apspec(aperture, coordsys=coordsys, wunit=wunit, method=method)
sp.plot_special = types.MethodType(self.plot_special, sp)
combined_kwargs = dict(kwargs.items())
combined_kwargs.update(self.plot_special_kwargs)
sp.plot_special(reset_ylimits=reset_ylimits, **combined_kwargs)
def get_spectrum(self, x, y):
"""
Very simple: get the spectrum at coordinates x,y
(inherits fitter from self)
Returns a SpectroscopicAxis instance
"""
ct = 'CTYPE{0}'.format(self._first_cel_axis_num)
header = cubes.speccen_header(fits.Header(cards=[(k,v) for k,v in
iteritems(self.header)
if k != 'HISTORY']),
lon=x, lat=y, system=self.system,
proj=(self.header[ct][-3:]
if ct in self.header else
'CAR'))
sp = spectrum.Spectrum(xarr=self.xarr.copy(), data=self.cube[:,int(y),int(x)],
header=header, error=(self.errorcube[:,int(y),int(x)] if
self.errorcube is not None
else None),
unit=self.unit,
model_registry=self.Registry,
)
sp.specfit = self.specfit.copy(parent=sp, registry=sp.Registry)
# explicitly re-do this (test)
sp.specfit.includemask = self.specfit.includemask.copy()
sp.specfit.Spectrum = sp
if hasattr(self, 'parcube'):
if self.has_fit[int(y),int(x)]:
# only set parameters if they're valid
sp.specfit.modelpars = self.parcube[:,int(y),int(x)]
if hasattr(self.specfit,'parinfo') and self.specfit.parinfo is not None:
# set the parinfo values correctly for annotations
for pi,p,e in zip(sp.specfit.parinfo, sp.specfit.modelpars, self.errcube[:,int(y),int(x)]):
try:
pi['value'] = p
pi['error'] = e
except ValueError:
pass
if hasattr(self.specfit,'fitter') and self.specfit.fitter is not None:
sp.specfit.fitter.mpp = sp.specfit.modelpars # also for annotations (differs depending on which function... sigh... need to unify)
sp.specfit.npeaks = self.specfit.fitter.npeaks
sp.specfit.fitter.npeaks = len(sp.specfit.modelpars) / sp.specfit.fitter.npars
sp.specfit.fitter.parinfo = sp.specfit.parinfo
try:
sp.specfit.model = sp.specfit.fitter.n_modelfunc(sp.specfit.modelpars,
**sp.specfit.fitter.modelfunc_kwargs)(sp.xarr)
except ValueError:
# possibly invalid model parameters, just skip
sp.specfit.model = np.zeros_like(sp.data)
return sp
def get_apspec(self, aperture, coordsys=None, method='mean', **kwargs):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube pixel coordinates)
*aperture* [tuple or list] (x, y, radius)
The aperture to use when extracting the data
*coordsys* [ 'celestial' | 'galactic' | None]
the coordinate system the aperture is specified in
None indicates pixel coordinates (default)
*wunit* [str]
arcsec, arcmin, or degree
"""
if coordsys is not None:
wcs = self.mapplot.wcs
else:
wcs = None
data = cubes.extract_aperture(self.cube, aperture,
coordsys=coordsys,
wcs=wcs,
method=method,
**kwargs)
if self.errorcube is not None:
error = cubes.extract_aperture(self.errorcube, aperture,
coordsys=coordsys,
wcs=self.mapplot.wcs,
method='error', **kwargs)
else:
error = None
ct = 'CTYPE{0}'.format(self._first_cel_axis_num)
header = cubes.speccen_header(fits.Header(cards=[(k,v) for k,v in
iteritems(self.header)
if k != 'HISTORY']),
lon=aperture[0],
lat=aperture[1],
system=self.system,
proj=self.header[ct][-3:])
if len(aperture) == 3:
header['APRADIUS'] = aperture[2]
if len(aperture) == 5:
header['APMAJ'] = aperture[2]
header['APMIN'] = aperture[3]
header['APREFF'] = (aperture[2]*aperture[3])**0.5
header['APPA'] = aperture[4]
sp = spectrum.Spectrum(xarr=self.xarr.copy(),
data=data,
error=error,
header=header,
model_registry=self.Registry,
)
sp.specfit = self.specfit.copy(parent=sp, registry=sp.Registry)
return sp
def set_apspec(self, aperture, coordsys=None, method='mean'):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube coordinates)
"""
if coordsys is not None:
self.data = cubes.extract_aperture(self.cube, aperture,
coordsys=coordsys,
wcs=self.mapplot.wcs,
method=method)
else:
self.data = cubes.extract_aperture(self.cube, aperture,
coordsys=None, method=method)
def get_modelcube(self, update=False, multicore=1):
"""
Return or generate a "model cube", which will have the same shape as
the ``.cube`` but will have spectra generated from the fitted model.
If the model cube does not yet exist, one will be generated
Parameters
----------
update : bool
If the cube has already been computed, set this to ``True`` to
recompute the model.
multicore: int
if >1, try to use multiprocessing via parallel_map to run on multiple cores
"""
if self._modelcube is None or update:
yy,xx = np.indices(self.parcube.shape[1:])
nanvals = np.any(~np.isfinite(self.parcube),axis=0)
isvalid = np.any(self.parcube, axis=0) & ~nanvals
valid_pixels = zip(xx[isvalid], yy[isvalid])
self._modelcube = np.full_like(self.cube, np.nan)
def model_a_pixel(xy):
x,y = int(xy[0]), int(xy[1])
self._modelcube[:,y,x] = self.specfit.get_full_model(pars=self.parcube[:,y,x])
return ((x,y), self._modelcube[:,y,x])
if multicore > 1:
sequence = [(x,y) for x,y in valid_pixels]
result = parallel_map(model_a_pixel, sequence, numcores=multicore)
merged_result = [core_result for core_result in result
if core_result is not None]
for mr in merged_result:
((x,y), model) = mr
x = int(x)
y = int(y)
self._modelcube[:,y,x] = model
else:
# progressbar doesn't work with zip; I'm therefore giving up on
# "efficiency" in memory by making a list here.
for xy in ProgressBar(list(valid_pixels)):
model_a_pixel(xy)
return self._modelcube
def fiteach(self, errspec=None, errmap=None, guesses=(), verbose=True,
verbose_level=1, quiet=True, signal_cut=3, usemomentcube=None,
blank_value=0, integral=False, direct_integral=False,
absorption=False, use_nearest_as_guess=False,
use_neighbor_as_guess=False, start_from_point=(0,0),
multicore=1, position_order=None, continuum_map=None,
prevalidate_guesses=False, maskmap=None,
skip_failed_fits=False,
**fitkwargs):
"""
Fit a spectrum to each valid pixel in the cube
For guesses, priority is *use_nearest_as_guess*, *usemomentcube*,
*guesses*, None
Once you have successfully run this function, the results will be
stored in the ``.parcube`` and ``.errcube`` attributes, which are each
cubes of shape ``[npars, ny, nx]``, where npars is the number of fitted
parameters and ``nx``, ``ny`` are the shape of the map. ``errcube``
contains the errors on the fitted parameters (1-sigma, as returned from
the Levenberg-Marquardt fit's covariance matrix). You can use the
attribute ``has_fit``, which is a map of shape ``[ny,nx]`` to find
which pixels have been successfully fit.
Parameters
----------
use_nearest_as_guess: bool
Unless the fitted point is the first, it will find the nearest
other point with a successful fit and use its best-fit parameters
as the guess
use_neighbor_as_guess: bool
Set this keyword to use the average best-fit parameters from
neighboring positions with successful fits as the guess
start_from_point: tuple(int,int)
Either start from the center or from a point defined by a tuple.
Work outward from that starting point.
position_order: ndarray[naxis=2]
2D map of region with pixel values indicating the order in which
to carry out the fitting. Any type with increasing pixel values.
guesses: tuple or ndarray[naxis=3]
Either a tuple/list of guesses with len(guesses) = npars or a cube
of guesses with shape [npars, ny, nx].
NOT TRUE, but a good idea in principle:
You can also use a dictionary of the form {(y,x): [list of length
npars]}, where (y,x) specifies a pixel location. If the dictionary
method is used, npars must be specified and it sets the length of
the first parameter axis
signal_cut: float
Minimum signal-to-noise ratio to "cut" on (i.e., if peak in a given
spectrum has s/n less than this value, ignore it)
blank_value: float
Value to replace non-fitted locations with.
errmap: ndarray[naxis=2] or ndarray[naxis=3]
A map of errors used for the individual pixels of the spectral
cube. 2D errmap results in an equal weighting of each given
spectrum, while a 3D array sets individual weights of each channel
verbose: bool
verbose_level: int
Controls how much is output.
0,1 - only changes frequency of updates in loop
2 - print out messages when skipping pixels
3 - print out messages when fitting pixels
4 - specfit will be verbose
multicore: int
if >1, try to use multiprocessing via parallel_map to run on
multiple cores
continuum_map: np.ndarray
Same shape as error map. Subtract this from data before estimating
noise.
prevalidate_guesses: bool
An extra check before fitting is run to make sure the guesses are
all within the specified limits. May be slow, so it is off by
default. It also should not be necessary, since careful checking
is performed before each fit.
maskmap : `np.ndarray`, optional
A boolean mask map, where ``True`` implies that the data are good.
This will be used for both plotting using `mapplot` and fitting
using `fiteach`. If ``None``, will use ``self.maskmap``.
integral : bool
If set, the integral of each spectral fit will be computed and
stored in the attribute ``.integralmap``
direct_integral : bool
Return the integral of the *spectrum* (as opposed to the fitted
model) over a range defined by the `integration_limits` if specified or
`threshold` otherwise
skip_failed_fits : bool
Flag to forcibly skip failed fits that fail with "unknown error".
Generally, you do not want this on, but this is the
'finger-over-the-engine-light' approach that will allow these
incomprehensible failures to go by and just ignore them. Keep
an eye on how many of these you get: if it's just one or two
out of hundreds, then maybe those are just pathological cases
that can be ignored. If it's a significant fraction, you probably
want to take a different approach.
"""
if 'multifit' in fitkwargs:
warn("The multifit keyword is no longer required. All fits "
"allow for multiple components.", DeprecationWarning)
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
if maskmap is None:
maskmap = self.maskmap
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = ((~self.mapplot.plane.mask) &
maskmap.astype('bool')).astype('bool')
else:
OK = (np.isfinite(self.mapplot.plane) &
maskmap.astype('bool')).astype('bool')
# NAN guesses rule out the model too
if hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
bad = np.isnan(guesses).sum(axis=0).astype('bool')
OK &= (~bad)
log.info("Fitting up to {0} spectra".format(OK.sum()))
if start_from_point == 'center':
start_from_point = (xx.max()/2., yy.max()/2.)
if hasattr(position_order,'shape') and position_order.shape == self.cube.shape[1:]:
sort_distance = np.argsort(position_order.flat)
else:
d_from_start = ((xx-start_from_point[1])**2 + (yy-start_from_point[0])**2)**0.5
sort_distance = np.argsort(d_from_start.flat)
if use_neighbor_as_guess or use_nearest_as_guess:
distance = ((xx)**2 + (yy)**2)**0.5
valid_pixels = list(zip(xx.flat[sort_distance][OK.flat[sort_distance]],
yy.flat[sort_distance][OK.flat[sort_distance]]))
if len(valid_pixels) != len(set(valid_pixels)):
raise ValueError("There are non-unique pixels in the 'valid pixel' list. "
"This should not be possible and indicates a major error.")
elif len(valid_pixels) == 0:
raise ValueError("No valid pixels selected.")
if start_from_point not in valid_pixels:
raise ValueError("The starting fit position is not among the valid"
" pixels. Check your selection criteria to make "
"sure you have not unintentionally excluded "
"this first fit pixel.")
if verbose_level > 0:
log.debug("Number of valid pixels: %i" % len(valid_pixels))
guesses_are_moments = (isinstance(guesses, string_types) and
guesses in ('moment','moments'))
if guesses_are_moments or (usemomentcube and len(guesses)):
if not hasattr(self, 'momentcube') and guesses_are_moments:
self.momenteach()
npars = self.momentcube.shape[0]
else:
npars = len(guesses)
if npars == 0:
raise ValueError("Parameter guesses are required.")
self.parcube = np.zeros((npars,)+self.mapplot.plane.shape)
self.errcube = np.zeros((npars,)+self.mapplot.plane.shape)
if integral:
self.integralmap = np.zeros((2,)+self.mapplot.plane.shape)
# newly needed as of March 27, 2012. Don't know why.
if 'fittype' in fitkwargs:
self.specfit.fittype = fitkwargs['fittype']
self.specfit.fitter = self.specfit.Registry.multifitters[self.specfit.fittype]
# TODO: VALIDATE THAT ALL GUESSES ARE WITHIN RANGE GIVEN THE
# FITKWARG LIMITS
# array to store whether pixels have fits
self.has_fit = np.zeros(self.mapplot.plane.shape, dtype='bool')
self._counter = 0
self._tracebacks = {}
t0 = time.time()
def fit_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
# very annoying - cannot use min/max without checking type
# maybe can use np.asarray here?
# cannot use sp.data.mask because it can be a scalar boolean,
# which does unpredictable things.
if hasattr(sp.data, 'mask') and not isinstance(sp.data.mask, (bool,
np.bool_)):
sp.data[sp.data.mask] = np.nan
sp.error[sp.data.mask] = np.nan
sp.data = np.array(sp.data)
sp.error = np.array(sp.error)
if errspec is not None:
sp.error = errspec
elif errmap is not None:
if self.errorcube is not None:
raise ValueError("Either the 'errmap' argument or"
" self.errorcube attribute should be"
" specified, but not both.")
if errmap.shape == self.cube.shape[1:]:
sp.error = np.ones(sp.data.shape) * errmap[int(y),int(x)]
elif errmap.shape == self.cube.shape:
sp.error = errmap[:, int(y), int(x)]
elif self.errorcube is not None:
sp.error = self.errorcube[:, int(y), int(x)]
else:
if ii==0:
# issue the warning only once (ii==0), but always issue
warn("Using data std() as error. "
"If signal_cut is set, this can result in "
"some pixels not being fit.",
PyspeckitWarning)
sp.error[:] = sp.data[sp.data==sp.data].std()
if sp.error is None:
raise TypeError("The Spectrum's error is unset. This should "
"not be possible. Please raise an Issue.")
if signal_cut > 0 and not all(sp.error == 0):
if continuum_map is not None:
with np.errstate(divide='raise'):
snr = (sp.data-continuum_map[int(y),int(x)]) / sp.error
else:
with np.errstate(divide='raise'):
snr = sp.data / sp.error
if absorption:
max_sn = np.nanmax(-1*snr)
else:
max_sn = np.nanmax(snr)
if max_sn < signal_cut:
if verbose_level > 1:
log.info("Skipped %4i,%4i (s/n=%0.2g)" % (x,y,max_sn))
return
elif np.isnan(max_sn):
if verbose_level > 1:
log.info("Skipped %4i,%4i (s/n is nan; max(data)=%0.2g, min(error)=%0.2g)" %
(x,y,np.nanmax(sp.data),np.nanmin(sp.error)))
return
if verbose_level > 2:
log.info("Fitting %4i,%4i (s/n=%0.2g)" % (x,y,max_sn))
else:
max_sn = None
sp.specfit.Registry = self.Registry # copy over fitter registry
# Do some homework for local fits
# Exclude out of bounds points
xpatch, ypatch = get_neighbors(x,y,self.has_fit.shape)
local_fits = self.has_fit[ypatch+y,xpatch+x]
if use_nearest_as_guess and self.has_fit.sum() > 0:
if verbose_level > 1 and ii == 0 or verbose_level > 4:
log.info("Using nearest fit as guess")
rolled_distance = np.roll(np.roll(distance, x, 0), y, 1)
# If there's no fit, set its distance to be unreasonably large
# so it will be ignored by argmin
nearest_ind = np.argmin(rolled_distance+1e10*(~self.has_fit))
nearest_x, nearest_y = xx.flat[nearest_ind],yy.flat[nearest_ind]
if np.all(np.isfinite(self.parcube[:,nearest_y,nearest_x])):
gg = self.parcube[:,nearest_y,nearest_x]
else:
log.exception("Pixel {0},{1} had a fit including a NaN: {2}"
" so it will not be used as a guess for {3},{4}"
.format(nearest_x, nearest_y, self.parcube[:, nearest_y, nearest_x],
x, y))
gg = guesses
elif use_neighbor_as_guess and np.any(local_fits):
# Array is N_guess X Nvalid_nbrs so averaging over
# Axis=1 is the axis of all valid neighbors
gg = np.mean(self.parcube[:,
(ypatch+y)[local_fits],
(xpatch+x)[local_fits]], axis=1)
if np.any(~np.isfinite(gg)):
log.exception("Pixel {0},{1} neighbors had non-finite guess: {2}"
.format(x, y, gg))
gg = guesses
elif guesses_are_moments and usemomentcube is False:
raise ValueError("usemomentcube must be set to True")
elif guesses_are_moments or (usemomentcube and len(guesses)):
if not guesses_are_moments and ii == 0:
log.warn("guesses will be ignored because usemomentcube "
"was set to True.", PyspeckitWarning)
if verbose_level > 1 and ii == 0:
log.info("Using moment cube")
gg = self.momentcube[:,int(y),int(x)]
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
if verbose_level > 1 and ii == 0:
log.info("Using input guess cube")
gg = guesses[:,int(y),int(x)]
elif isinstance(guesses, dict):
if verbose_level > 1 and ii == 0:
log.info("Using input guess dict")
gg = guesses[(int(y),int(x))]
else:
if verbose_level > 1 and ii == 0:
log.info("Using input guess")
gg = guesses
if np.all(np.isfinite(gg)):
try:
with np.errstate(divide='raise'):
sp.specfit(guesses=gg, quiet=verbose_level<=3,
verbose=verbose_level>3, **fitkwargs)
self.parcube[:,int(y),int(x)] = sp.specfit.modelpars
self.errcube[:,int(y),int(x)] = sp.specfit.modelerrs
if np.any(~np.isfinite(sp.specfit.modelpars)):
log.exception("Fit result included nan for pixel {0},{1}: "
"{2}".format(x, y, sp.specfit.modelpars))
success = False
# this is basically a debug statement to try to get the
# code to crash here
raise KeyboardInterrupt
else:
success = True
except Exception as ex:
exc_traceback = sys.exc_info()[2]
self._tracebacks[(ii,x,y)] = exc_traceback
log.exception("Fit number %i at %i,%i failed on error %s" % (ii,x,y, str(ex)))
log.exception("Failure was in file {0} at line {1}".format(
exc_traceback.tb_frame.f_code.co_filename,
exc_traceback.tb_lineno,))
traceback.print_tb(exc_traceback)
log.exception("Guesses were: {0}".format(str(gg)))
log.exception("Fitkwargs were: {0}".format(str(fitkwargs)))
success = False
if isinstance(ex, KeyboardInterrupt):
raise ex
# keep this out of the 'try' statement
if integral and success:
self.integralmap[:,int(y),int(x)] = sp.specfit.integral(direct=direct_integral,
return_error=True)
self.has_fit[int(y),int(x)] = success
else:
log.exception("Fit number {0} at {1},{2} had non-finite guesses {3}"
.format(ii, x, y, guesses))
self.has_fit[int(y),int(x)] = False
self.parcube[:,int(y),int(x)] = blank_value
self.errcube[:,int(y),int(x)] = blank_value
if integral:
self.integralmap[:,int(y),int(x)] = blank_value
self._counter += 1
if verbose:
if ii % (min(10**(3-verbose_level),1)) == 0:
snmsg = " s/n=%5.1f" % (max_sn) if max_sn is not None else ""
npix = len(valid_pixels)
pct = 100 * (ii+1.0)/float(npix)
log.info("Finished fit %6i of %6i at (%4i,%4i)%s. Elapsed time is %0.1f seconds. %%%01.f" %
(ii+1, npix, x, y, snmsg, time.time()-t0, pct))
if sp.specfit.modelerrs is None:
log.exception("Fit number %i at %i,%i failed with no specific error." % (ii,x,y))
if hasattr(sp.specfit, 'mpfit_status'):
log.exception("mpfit status is {0}".format(sp.specfit.mpfit_status))
log.exception("The problem is that the model errors were never set, "
"which implies that the fit simply failed to finish.")
log.exception("The string representation of `sp.specfit.parinfo` is: {0}"
.format(sp.specfit.parinfo))
log.exception("The string representation of `sp.specfit.fitter.parinfo` is: {0}"
.format(sp.specfit.fitter.parinfo))
log.exception("modelpars is: {0}".format(str(sp.specfit.modelpars)))
log.exception("cube modelpars are: {0}".format(str(self.parcube[:,int(y),int(x)])))
log.exception("cube modelerrs are: {0}".format(str(self.errcube[:,int(y),int(x)])))
log.exception("Guesses were: {0}".format(str(gg)))
log.exception("Fitkwargs were: {0}".format(str(fitkwargs)))
if skip_failed_fits:
# turn the flag into a count
log.exception("The fit never completed; something has gone wrong. Failed fits = {0}".format(int(skip_failed_fits)))
else:
raise TypeError("The fit never completed; something has gone wrong.")
# blank out the errors (and possibly the values) wherever they are zero = assumed bad
# this is done after the above exception to make sure we can inspect these values
if blank_value != 0:
self.parcube[self.parcube == 0] = blank_value
self.errcube[self.parcube == 0] = blank_value
if integral:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs,
self.integralmap[:,int(y),int(x)])
else:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs)
#### BEGIN TEST BLOCK ####
# This test block is to make sure you don't run a 30 hour fitting
# session that's just going to crash at the end.
# try a first fit for exception-catching
if len(start_from_point) == 2:
try0 = fit_a_pixel((0,start_from_point[0], start_from_point[1]))
else:
try0 = fit_a_pixel((0,valid_pixels[0][0],valid_pixels[0][1]))
try:
len_guesses = len(self.momentcube) if (usemomentcube or
guesses_are_moments) else len(guesses)
assert len(try0[1]) == len_guesses == len(self.parcube) == len(self.errcube)
assert len(try0[2]) == len_guesses == len(self.parcube) == len(self.errcube)
except TypeError as ex:
if try0 is None:
raise AssertionError("The first fitted pixel did not yield a "
"fit. Please try starting from a "
"different pixel.")
else:
raise ex
except AssertionError:
raise AssertionError("The first pixel had the wrong fit "
"parameter shape. This is probably "
"a bug; please report it.")
# This is a secondary test... I'm not sure it's necessary, but it
# replicates what's inside the fit_a_pixel code and so should be a
# useful sanity check
x,y = valid_pixels[0]
sp = self.get_spectrum(x,y)
sp.specfit.Registry = self.Registry # copy over fitter registry
# this reproduced code is needed because the functional wrapping
# required for the multicore case prevents gg from being set earlier
if usemomentcube or guesses_are_moments:
gg = self.momentcube[:,int(y),int(x)]
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
gg = guesses[:,int(y),int(x)]
else:
gg = guesses
# This is NOT in a try/except block because we want to raise the
# exception here if an exception is going to happen
sp.specfit(guesses=gg, **fitkwargs)
if prevalidate_guesses:
if guesses.ndim == 3:
for ii,(x,y) in ProgressBar(tuple(enumerate(valid_pixels))):
pinf, _ = sp.specfit.fitter._make_parinfo(parvalues=guesses[:,int(y),int(x)], **fitkwargs)
sp.specfit._validate_parinfo(pinf, 'raise')
else:
pinf, _ = sp.specfit.fitter._make_parinfo(parvalues=guesses, **fitkwargs)
sp.specfit._validate_parinfo(pinf, 'raise')
#### END TEST BLOCK ####
if multicore > 1:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
with np.errstate(divide='raise'):
result = parallel_map(fit_a_pixel, sequence, numcores=multicore)
self._result = result # backup - don't want to lose data in the case of a failure
# a lot of ugly hacking to deal with the way parallel_map returns
# its results needs TWO levels of None-filtering, because any
# individual result can be None (I guess?) but apparently (and this
# part I don't believe) any individual *fit* result can be None as
# well (apparently the x,y pairs can also be None?)
merged_result = [core_result for core_result in result if
core_result is not None]
# for some reason, every other time I run this code, merged_result
# ends up with a different intrinsic shape. This is an attempt to
# force it to maintain a sensible shape.
try:
if integral:
((x,y), m1, m2, intgl) = merged_result[0]
else:
((x,y), m1, m2) = merged_result[0]
except ValueError:
if verbose > 1:
log.exception("ERROR: merged_result[0] is {0} which has the"
" wrong shape".format(merged_result[0]))
merged_result = itertools.chain.from_iterable(merged_result)
for TEMP in merged_result:
if TEMP is None:
# this shouldn't be possible, but it appears to happen
# anyway. parallel_map is great, up to a limit that was
# reached long before this level of complexity
log.debug("Skipped a None entry: {0}".format(str(TEMP)))
continue
try:
if integral:
((x,y), modelpars, modelerrs, intgl) = TEMP
else:
((x,y), modelpars, modelerrs) = TEMP
except TypeError:
# implies that TEMP does not have the shape ((a,b),c,d)
# as above, shouldn't be possible, but it happens...
log.debug("Skipped a misshapen entry: {0}".format(str(TEMP)))
continue
if ((len(modelpars) != len(modelerrs)) or
(len(modelpars) != len(self.parcube))):
raise ValueError("There was a serious problem; modelpar and"
" error shape don't match that of the "
"parameter cubes")
if ((any([x is None for x in modelpars]) or
np.any(np.isnan(modelpars)) or
any([x is None for x in modelerrs]) or
np.any(np.isnan(modelerrs)))):
self.parcube[:,int(y),int(x)] = np.nan
self.errcube[:,int(y),int(x)] = np.nan
self.has_fit[int(y),int(x)] = False
else:
self.parcube[:,int(y),int(x)] = modelpars
self.errcube[:,int(y),int(x)] = modelerrs
self.has_fit[int(y),int(x)] = max(modelpars) > 0
if integral:
self.integralmap[:,int(y),int(x)] = intgl
else:
for ii,(x,y) in enumerate(valid_pixels):
fit_a_pixel((ii,x,y))
# March 27, 2014: This is EXTREMELY confusing. This isn't in a loop...
# make sure the fitter / fittype are set for the cube
# this has to be done within the loop because skipped-over spectra
# don't ever get their fittypes set
self.specfit.fitter = sp.specfit.fitter
self.specfit.fittype = sp.specfit.fittype
self.specfit.parinfo = sp.specfit.parinfo
if verbose:
log.info("Finished final fit %i. "
"Elapsed time was %0.1f seconds" % (len(valid_pixels), time.time()-t0))
pars_are_finite = np.all(np.isfinite(self.parcube), axis=0)
# if you see one of these exceptions, please try to produce a minimum
# working example and report it as a bug.
# all non-finite fit parameters should be has_fit=False
assert np.all(~self.has_fit[~pars_are_finite]), "Non-finite parameters found in fits"
def momenteach(self, verbose=True, verbose_level=1, multicore=1, **kwargs):
"""
Return a cube of the moments of each pixel
Parameters
----------
multicore: int
if >1, try to use multiprocessing via parallel_map to run on multiple cores
"""
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
if 'vheight' not in kwargs:
kwargs['vheight'] = False
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = (~self.mapplot.plane.mask) * self.maskmap
else:
OK = np.isfinite(self.mapplot.plane) * self.maskmap
valid_pixels = zip(xx[OK],yy[OK])
# run the moment process to find out how many elements are in a moment
_temp_moment = self.get_spectrum(yy[OK][0],xx[OK][0]).moments(**kwargs)
self.momentcube = np.zeros((len(_temp_moment),)+self.mapplot.plane.shape)
t0 = time.time()
def moment_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
self.momentcube[:,int(y),int(x)] = sp.moments(**kwargs)
if verbose:
if ii % 10**(3-verbose_level) == 0:
log.info("Finished moment %i. "
"Elapsed time is %0.1f seconds" % (ii, time.time()-t0))
return ((x,y), self.momentcube[:,int(y),int(x)])
if multicore > 1:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
result = parallel_map(moment_a_pixel, sequence, numcores=multicore)
merged_result = [core_result.tolist()
for core_result in result
if core_result is not None]
for TEMP in merged_result:
((x,y), moments) = TEMP
self.momentcube[:,int(y),int(x)] = moments
else:
for ii,(x,y) in enumerate(valid_pixels):
moment_a_pixel((ii,x,y))
if verbose:
log.info("Finished final moment %i. "
"Elapsed time was %0.1f seconds" % (OK.sum(), time.time()-t0))
def show_moment(self, momentnumber, **kwargs):
"""
If moments have been computed, display them in the mapplot window
"""
if not hasattr(self,'momentcube'):
raise ValueError("Compute moments first")
self.mapplot.plane = self.momentcube[momentnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def show_fit_param(self, parnumber, **kwargs):
"""
If pars have been computed, display them in the mapplot window
Parameters
----------
parnumber : int
The index of the parameter in the parameter cube
"""
if not hasattr(self,'parcube'):
raise ValueError("Compute fit parameters first")
self.mapplot.plane = self.parcube[parnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def load_model_fit(self, fitsfilename, npars, npeaks=1, fittype=None,
_temp_fit_loc=(0,0)):
"""
Load a parameter + error cube into the ``.parcube`` and ``.errcube``
attributes. The models can then be examined and plotted using
``.mapplot`` as if you had run ``.fiteach``.
Parameters
----------
fitsfilename : str
The filename containing the parameter cube written with `write_fit`
npars : int
The number of parameters in the model fit for a single spectrum
npeaks : int
The number of independent peaks fit toward each spectrum
fittype : str, optional
The name of the fittype, e.g. 'gaussian' or 'voigt', from the
pyspeckit fitter registry. This is optional; it should have
been written to the FITS header and will be read from there if
it is not specified
_temp_fit_loc : tuple (int,int)
The initial spectrum to use to generate components of the class.
This should not need to be changed.
"""
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
cubefile = pyfits.open(fitsfilename,ignore_missing_end=True)
cube = cubefile[0].data
if cube.shape[0] != npars * npeaks * 2:
raise ValueError("The cube shape is not correct. The cube has "
"first dimension = {0}, but it should be {1}. "
"The keyword npars = number of parameters per "
"model component, and npeaks = number of "
"independent peaks. You gave npars={2} and "
"npeaks={3}".format(cube.shape[0], npars*npeaks*2,
npars, npeaks))
# grab a spectrum and fit it however badly you want
# this is just to __init__ the relevant data structures
if fittype is None:
if cubefile[0].header.get('FITTYPE'):
fittype = cubefile[0].header.get('FITTYPE')
else:
raise KeyError("Must specify FITTYPE or include it in cube header.")
self.parcube = cube[:npars*npeaks,:,:]
self.errcube = cube[npars*npeaks:npars*npeaks*2,:,:]
if np.any(np.all(self.parcube == 0, axis=(1,2))):
# there are some slices where all parameters are zero, we should
# ignore this when establishing whether there's a fit (some
# parameters, like fortho, can be locked to zero)
self.has_fit = np.all((np.isfinite(self.parcube)), axis=0)
else:
self.has_fit = np.all((self.parcube != 0) &
(np.isfinite(self.parcube)), axis=0)
nanvals = ~np.isfinite(self.parcube)
nanvals_flat = np.any(nanvals, axis=0)
if np.any(nanvals):
warn("NaN or infinite values encountered in parameter cube.",
PyspeckitWarning)
# make sure params are within limits
fitter = self.specfit.Registry.multifitters[fittype]
guesses,throwaway = fitter._make_parinfo(npeaks=npeaks)
try:
x,y = _temp_fit_loc
sp = self.get_spectrum(x,y)
guesses.values = self.parcube[:,int(y),int(x)]
sp.specfit(fittype=fittype, guesses=guesses.values)
except Exception as ex1:
try:
OKmask = np.any(self.parcube, axis=0) & ~nanvals_flat
whereOK = np.where(OKmask)
x,y = whereOK[1][0],whereOK[0][0]
sp = self.get_spectrum(x,y)
guesses.values = self.parcube[:,int(y),int(x)]
sp.specfit(fittype=fittype, guesses=guesses.values)
except Exception as ex2:
log.error("Fitting the pixel at location {0} failed with error: {1}. "
"Re-trying at location {2} failed with error {3}. "
"Try setting _temp_fit_loc to a valid pixel".format(_temp_fit_loc, ex1,
(x,y), ex2))
self.specfit.fitter = sp.specfit.fitter
self.specfit.fittype = sp.specfit.fittype
self.specfit.parinfo = sp.specfit.parinfo
def smooth(self,factor,**kwargs):
"""
Smooth the spectrum by factor `factor`.
Documentation from the :mod:`cubes.spectral_smooth` module:
"""
factor = round(factor)
self.cube = cubes.spectral_smooth(self.cube,factor,**kwargs)
self.xarr = self.xarr[::factor]
if hasattr(self,'data'):
self.data = smooth.smooth(self.data,factor,**kwargs)
if len(self.xarr) != self.cube.shape[0]:
raise ValueError("Convolution resulted in different X and Y array lengths. Convmode should be 'same'.")
if self.errorcube is not None:
self.errorcube = cubes.spectral_smooth(self.errorcube,factor,**kwargs)
self._smooth_header(factor)
__doc__ += "cubes.spectral_smooth doc: \n" + cubes.spectral_smooth.__doc__
def _smooth_header(self,factor):
"""
Internal - correct the FITS header parameters when smoothing
"""
if self.header.get('CDELT3') is not None and self.header.get('CRPIX3') is not None:
self.header['CDELT3'] = self.header.get('CDELT3') * float(factor)
self.header['CRPIX3'] = self.header.get('CRPIX3') / float(factor)
history.write_history(self.header,"SMOOTH: Smoothed and downsampled spectrum by factor %i" % (factor))
history.write_history(self.header,"SMOOTH: Changed CRPIX3 from %f to %f" % (self.header.get('CRPIX3')*float(factor),self.header.get('CRPIX3')))
history.write_history(self.header,"SMOOTH: Changed CDELT3 from %f to %f" % (self.header.get('CRPIX3')/float(factor),self.header.get('CRPIX3')))
def write_fit(self, fitcubefilename, overwrite=False):
"""
Write out a fit cube containing the ``.parcube`` and ``.errcube`` using
the information in the fit's parinfo to set the header keywords. The
``PLANE#`` keywords will be used to indicate the content of each plane
in the data cube written to the FITS file. All of the fitted
parameters will be written first, followed by all of the errors on
those parameters. So, for example, if you have fitted a single
gaussian to each pixel, the dimensions of the saved cube will be ``[6,
ny, nx]``, and they will be the amplitude, centroid, width, error on
amplitude, error on centroid, and error on width, respectively.
To load such a file back in for plotting purposes, see
`SpectralCube.load_model_fit`.
Parameters
----------
fitcubefilename: string
Filename to write to
overwrite: bool
Overwrite file if it exists?
"""
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
fitcubefile = pyfits.PrimaryHDU(data=np.concatenate([self.parcube,self.errcube]), header=self.header)
fitcubefile.header['FITTYPE'] = self.specfit.fittype
for ii,par in enumerate(self.specfit.parinfo):
kw = "PLANE%i" % ii
parname = par['parname'].strip('0123456789')
fitcubefile.header[kw] = parname
# set error parameters
for jj,par in enumerate(self.specfit.parinfo):
kw = "PLANE%i" % (ii+jj+1)
parname = "e"+par['parname'].strip('0123456789')
fitcubefile.header[kw] = parname
# overwrite the WCS
fitcubefile.header['CDELT3'] = 1
fitcubefile.header['CTYPE3'] = 'FITPAR'
fitcubefile.header['CRVAL3'] = 0
fitcubefile.header['CRPIX3'] = 1
except AttributeError:
log.exception("Make sure you run the cube fitter first.")
return
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
fitcubefile.writeto(fitcubefilename, overwrite=overwrite)
else:
fitcubefile.writeto(fitcubefilename, clobber=overwrite)
def write_cube(self):
raise NotImplementedError
class CubeStack(Cube):
"""
The Cube equivalent of Spectra: for stitching multiple cubes with the same
spatial grid but different frequencies together
"""
def __init__(self, cubelist, xunit='GHz', x0=0, y0=0, maskmap=None, **kwargs):
"""
Initialize the Cube. Accepts FITS files.
x0,y0 - initial spectrum to use (defaults to lower-left corner)
"""
log.info("Creating Cube Stack")
cubelist = list(cubelist)
for ii,cube in enumerate(cubelist):
if type(cube) is str:
cube = Cube(cube)
cubelist[ii] = cube
if cube.xarr.unit != xunit:
# convert all inputs to same (non-velocity) unit
cube.xarr.convert_to_unit(xunit, **kwargs)
self.cubelist = cubelist
log.info("Concatenating data")
self.xarr = SpectroscopicAxes([sp.xarr for sp in cubelist])
self.cube = np.ma.concatenate([icube.cube for icube in cubelist])
if np.any([icube.errorcube is not None for icube in cubelist]):
if all([icube.errorcube is not None for icube in cubelist]):
self.errorcube = np.ma.concatenate([icube.errorcube for icube in cubelist])
else:
raise ValueError("Mismatched error cubes.")
else:
self.errorcube = None
if hasattr(self.cube,'mask'):
try:
if self.cube.mask in (False,np.bool_(False)):
# mask causes major problems internally for numpy...
self.cube = np.array(self.cube)
except ValueError:
# this means that self.cube.mask is an array;
# techically that's alright
pass
self._sort()
self.data = self.cube[:,int(y0),int(x0)]
self.error = self.errorcube[:,int(y0),int(x0)] if self.errorcube is not None else None
self.header = cubelist[0].header.copy()
for cube in cubelist:
for key,value in cube.header.items():
if key in ['HISTORY', 'COMMENT']:
continue
self.header[key] = value
if self.header:
self.wcs = wcs.WCS(self.header)
self.wcs.wcs.fix()
self._spectral_axis_number = self.wcs.wcs.spec+1
self._first_cel_axis_num = np.where(self.wcs.wcs.axis_types // 1000 == 2)[0][0]+1
# TODO: Improve this!!!
self.system = ('galactic'
if ('CTYPE{0}'.format(self._first_cel_axis_num)
in self.header and 'GLON' in
self.header['CTYPE{0}'.format(self._first_cel_axis_num)])
else 'celestial')
else:
self._spectral_axis_number = 3
self._first_cel_axis_num = 1
self.system = 'PIXEL'
self.unit = cubelist[0].unit
for cube in cubelist:
if cube.unit != self.unit:
raise ValueError("Mismatched units "
"{0} and {1}".format(cube.unit, self.unit))
self.fileprefix = cubelist[0].fileprefix # first is the best?
if maskmap is not None:
self.maskmap = maskmap
else:
self.maskmap = np.ones(self.cube.shape[1:],dtype='bool')
self._register_fitters()
self.plotter = spectrum.plotters.Plotter(self)
self.specfit = spectrum.fitters.Specfit(self,Registry=self.Registry)
self.baseline = spectrum.baseline.Baseline(self)
self.speclines = spectrum.speclines
# Initialize writers TO DO: DO WRITERS WORK FOR CUBES?
self.writer = {}
for writer in spectrum.writers.writers:
self.writer[writer] = spectrum.writers.writers[writer](self)
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
self._modelcube = None
self.mapplot = mapplot.MapPlotter(self)
def _sort(self):
""" Sort the data in order of increasing X (could be decreasing, but
must be monotonic for plotting reasons) """
indices = self.xarr.argsort()
self.xarr = self.xarr[indices]
self.cube = self.cube[indices,:,:]
if self.errorcube is not None:
self.errorcube = self.errorcube[indices,:,:]
def get_neighbors(x, y, shape):
"""
Find the 9 nearest neighbors, excluding self and any out of bounds points
"""
ysh, xsh = shape
xpyp = [(ii,jj)
for ii,jj in itertools.product((-1,0,1),
(-1,0,1))
if (ii+x < xsh) and (ii+x >= 0)
and (jj+y < ysh) and (jj+y >= 0)
and not (ii==0 and jj==0)]
xpatch, ypatch = zip(*xpyp)
return np.array(xpatch, dtype='int'), np.array(ypatch, dtype='int')
def test_get_neighbors():
xp,yp = get_neighbors(0,0,[10,10])
assert set(xp) == {0,1}
assert set(yp) == {0,1}
xp,yp = get_neighbors(0,1,[10,10])
assert set(xp) == {0,1}
assert set(yp) == {-1,0,1}
xp,yp = get_neighbors(5,6,[10,10])
assert set(xp) == {-1,0,1}
assert set(yp) == {-1,0,1}
xp,yp = get_neighbors(9,9,[10,10])
assert set(xp) == {0,-1}
assert set(yp) == {0,-1}
xp,yp = get_neighbors(9,8,[10,10])
assert set(xp) == {-1,0}
assert set(yp) == {-1,0,1}
|
low-sky/pyspeckit
|
pyspeckit/cubes/SpectralCube.py
|
Python
|
mit
| 72,624
|
[
"Gaussian"
] |
c5b0567beb978d0cfeb5b938183e713a62f4007c875c9565a98bcd37ac43cefc
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
import six
from six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, filter(None, PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=six.u("easy_install-"))
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
if not self.editable:
self.install_site_py()
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"lib",
"python" + sys.version[:3],
"site-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
sys.version[:3],
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/lib/site-packages/setuptools/command/easy_install.py
|
Python
|
gpl-3.0
| 85,919
|
[
"VisIt"
] |
941c950219db3e4e678c234f8fefe54686eebd02baf4751ac0f66f565a179b04
|
from textwrap import dedent
import py, pytest
from _pytest.config import PytestPluginManager
@pytest.fixture(scope="module", params=["global", "inpackage"])
def basedir(request):
from _pytest.tmpdir import tmpdir
tmpdir = tmpdir(request)
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
tmpdir.ensure("adir/__init__.py")
tmpdir.ensure("adir/b/__init__.py")
return tmpdir
def ConftestWithSetinitial(path):
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
def conftest_setinitial(conftest, args, confcutdir=None):
class Namespace:
def __init__(self):
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
conftest._set_initial_conftests(Namespace())
class TestConftestValueAccessGlobal:
def test_basic_init(self, basedir):
conftest = PytestPluginManager()
p = basedir.join("adir")
assert conftest._rget_with_confmod("a", p)[1] == 1
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = PytestPluginManager()
len(conftest._path2confmods)
conftest._getconftestmodules(basedir)
snap1 = len(conftest._path2confmods)
#assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('adir'))
assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('b'))
assert len(conftest._path2confmods) == snap1 + 2
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
with pytest.raises(KeyError):
conftest._rget_with_confmod('a', basedir)
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
adir = basedir.join("adir")
assert conftest._rget_with_confmod("a", adir)[1] == 1
assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest._rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_considered(testdir):
conf = testdir.mkdir("--option")
conf.join("conftest.py").ensure()
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
l = conftest._getconftestmodules(conf)
assert len(l) == 1
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile("""
import py, pytest
from _pytest.config import PytestPluginManager
conf = PytestPluginManager()
mod = conf._importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf._importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
""")
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
l = conftest._getconftestmodules(p)
assert len(l) == 0
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf)
l = conftest._getconftestmodules(conf.dirpath())
assert l[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
l = conftest._getconftestmodules(p)
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
if name not in ('whatever', '.dotdir'):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(py.code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert 'warning: could not load initial' not in result.stdout.str()
def test_no_conftest(testdir):
testdir.makeconftest("assert 0")
result = testdir.runpytest("--noconftest")
assert result.ret == 0
def test_conftest_existing_resultlog(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(py.code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".log", result="") # Writes result.log
result = testdir.runpytest("-h", "--resultlog", "result.log")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_existing_junitxml(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(py.code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = PytestPluginManager()
monkeypatch.setattr(conftest, '_importconftest', impct)
assert conftest._getconftestmodules(sub) == [ct1, ct2]
def test_fixture_dependency(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
ct1 = testdir.makepyfile("__init__.py")
ct1.write("")
sub = testdir.mkdir("sub")
sub.join("__init__.py").write("")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def not_needed():
assert False, "Should not be called!"
@pytest.fixture
def foo():
assert False, "Should not be called!"
@pytest.fixture
def bar(foo):
return 'bar'
"""))
subsub = sub.mkdir("subsub")
subsub.join("__init__.py").write("")
subsub.join("test_bar.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def bar():
return 'sub bar'
def test_event_fixture(bar):
assert bar == 'sub bar'
"""))
result = testdir.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_found_with_double_dash(testdir):
sub = testdir.mkdir("sub")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
def pytest_addoption(parser):
parser.addoption("--hello-world", action="store_true")
"""))
p = sub.join("test_hello.py")
p.write(py.std.textwrap.dedent("""
import pytest
def test_hello(found):
assert found == 1
"""))
result = testdir.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines("""
*--hello-world*
""")
class TestConftestVisibility:
def _setup_tree(self, testdir): # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("empty")
package = testdir.mkdir("package")
package.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""))
package.join("test_pkgroot.py").write(dedent("""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""))
swc = package.mkdir("swc")
swc.join("__init__.py").ensure()
swc.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""))
swc.join("test_with_conftest.py").write(dedent("""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""))
snc = package.mkdir("snc")
snc.join("__init__.py").ensure()
snc.join("test_no_conftest.py").write(dedent("""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""))
print ("created directory structure:")
for x in testdir.tmpdir.visit():
print (" " + x.relto(testdir.tmpdir))
return {
"runner": runner,
"package": package,
"swc": swc,
"snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
])
@pytest.mark.issue616
def test_parsefactories_relative_node_ids(
self, testdir, chdir,testarg, expect_ntests_passed):
dirs = self._setup_tree(testdir)
print("pytest run in cwd: %s" %(
dirs[chdir].relto(testdir.tmpdir)))
print("pytestarg : %s" %(testarg))
print("expected pass : %s" %(expect_ntests_passed))
with dirs[chdir].as_cwd():
reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
reprec.assertoutcome(passed=expect_ntests_passed)
|
Carreau/pytest
|
testing/test_conftest.py
|
Python
|
mit
| 12,459
|
[
"VisIt"
] |
794a38035b93efe3dcf2a884174134d6753cdc758d3d25d6d12df9e770b7ad07
|
# tcr.py ---
#
# Filename: tcr.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Fri Oct 16 10:14:07 2009 (+0530)
# Version:
# Last-Updated: Fri Oct 21 17:14:04 2011 (+0530)
# By: Subhasis Ray
# Update #: 60
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary: This is a redoing of the Thalamocortical relay cells using prototype file.
# It is a translation of the cell in Traub et al, 2005 model.
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from datetime import datetime
import config
import trbutil
import moose
from cell import *
from capool import CaPool
class TCR(TraubCell):
chan_params = {
'ENa': 50e-3,
'EK': -95e-3,
'EAR': -35e-3,
'ECa': 125e-3,
'EGABA': -75e-3, # Sanchez-Vives et al. 1997
'TauCa': 20e-3,
'X_AR': 0.25
}
ca_dep_chans = ['KAHP_SLOWER', 'KC']
num_comp = 137
presyn = 135
level = None
depth = None
proto_file = 'TCR.p'
prototype = TraubCell.read_proto(proto_file, "TCR", chan_params)
def __init__(self, *args):
TraubCell.__init__(self, *args)
moose.CaConc(self.soma.path + '/CaPool').tau = 50e-3
def _topology(self):
self.presyn = 135
self.level[1].add(self.comp[1])
for ii in range(2, 120, 13):
self.level[2].add(self.comp[ii])
for ii in range(3, 121, 13):
self.level[3].add(self.comp[ii])
self.level[3].add(self.comp[ii+1])
self.level[3].add(self.comp[ii+2])
for ii in range(6, 124, 13):
for kk in range(0,9):
self.level[4].add(self.comp[ii+kk])
for ii in range(132, 138):
self.level[0].add(self.comp[ii])
def _setup_passive(self):
for comp in self.comp[1:]:
comp.Em = -70e-3
comp.initVm = -70e-3
def _setup_channels(self):
"""Set up connections between compartment and channels, and Ca pool"""
for comp in self.comp[1:]:
ca_pool = None
ca_dep_chans = []
ca_chans = []
for child in comp.children():
obj = moose.Neutral(child)
if obj.name == 'CaPool':
ca_pool = moose.CaConc(child)
ca_pool.tau = 20e-3
else:
obj_class = obj.className
if obj_class == 'HHChannel':
obj = moose.HHChannel(child)
# if not obj.name in self.chan_list:
# obj.Gbar = 0.0
pyclass = eval(obj.name)
if issubclass(pyclass, KChannel):
obj.Ek = -95e-3
if issubclass(pyclass, KCaChannel):
ca_dep_chans.append(obj)
elif issubclass(pyclass, NaChannel):
obj.Ek = 50e-3
elif issubclass(pyclass, CaChannel):
obj.Ek = 125e-3
if issubclass(pyclass, CaL):
ca_chans.append(obj)
elif issubclass(pyclass, AR):
obj.Ek = -35e-3
if ca_pool:
for channel in ca_chans:
channel.connect('IkSrc', ca_pool, 'current')
for channel in ca_dep_chans:
channel.useConcentration = 1
ca_pool.connect("concSrc", channel, "concen")
@classmethod
def test_single_cell(cls):
"""Simulates a single thalamocortical relay cell
and plots the Vm and [Ca2+]"""
config.LOGGER.info("/**************************************************************************")
config.LOGGER.info(" *")
config.LOGGER.info(" * Simulating a single cell: %s" % (cls.__name__))
config.LOGGER.info(" *")
config.LOGGER.info(" **************************************************************************/")
sim = Simulation(cls.__name__)
mycell = TCR(TCR.prototype, sim.model.path + "/TCR")
print 'Created cell:', mycell.path
vm_table = mycell.comp[mycell.presyn].insertRecorder('Vm_TCR', 'Vm', sim.data)
pulsegen = mycell.soma.insertPulseGen('pulsegen', sim.model, firstLevel=3e-10, firstDelay=50e-3, firstWidth=50e-3)
sim.schedule()
if mycell.has_cycle():
print "WARNING!! CYCLE PRESENT IN CICRUIT."
t1 = datetime.now()
sim.run(200e-3)
t2 = datetime.now()
delta = t2 - t1
print 'simulation time: ', delta.seconds + 1e-6 * delta.microseconds
sim.dump_data('data')
if config.has_pylab:
mus_vm = config.pylab.array(vm_table) * 1e3
mus_t = linspace(0, sim.simtime * 1e3, len(mus_vm))
try:
nrn_vm = config.pylab.loadtxt('../nrn/mydata/Vm_deepLTS.plot')
nrn_t = nrn_vm[:, 0]
nrn_vm = nrn_vm[:, 1]
config.pylab.plot(nrn_t, nrn_vm, 'y-', label='nrn vm')
except IOError:
print 'NEURON Data not available.'
config.pylab.plot(mus_t, mus_vm, 'g-.', label='mus vm')
config.pylab.legend()
config.pylab.show()
# test main --
from simulation import Simulation
import pylab
from subprocess import call
if __name__ == "__main__":
TCR.test_single_cell()
#
# tcr.py ends here
|
BhallaLab/moose-thalamocortical
|
DEMOS/pymoose/traub2005/py/tcr.py
|
Python
|
lgpl-2.1
| 5,750
|
[
"MOOSE",
"NEURON"
] |
b1aadfd5b6d6dcd081029c495d67e40aca43f24413335ddb07052c6d86300ac6
|
import math
import heapq
import htslibWrapper
import pysam
#
# TODO:
#
# Current algorithm lumps reads from all read groups together
#
#########################################################################################
#
# Some math functions
#
#########################################################################################
def stirling(n):
# http://en.wikipedia.org/wiki/Stirling%27s_approximation
return math.sqrt(2*math.pi*n)*(n/math.e)**n
def logstirling(n):
if n == 0: return 0
return 0.5 * math.log(2*math.pi*n) + n * (math.log(n) - 1)
def npr(n,r):
return (stirling(n)/stirling(n-r) if n>20 else
math.factorial(n)/math.factorial(n-r))
def ncr(n,r):
return (stirling(n)/stirling(r)/stirling(n-r) if n>20 else
math.factorial(n)/math.factorial(r)/math.factorial(n-r))
#########################################################################################
#
# Definitions and utilities
#
#########################################################################################
CIGAR_M = 0
CIGAR_I = 1
CIGAR_D = 2
CIGAR_N = 3
CIGAR_S = 4
CIGAR_MA = 7
CIGAR_X = 8
MAX_POS = 3000000000
MAX_UNIT_LENGTH = 12 # from tandem.c
MIN_UNIT_LENGTH = 5
#########################################################################################
#
# Main loop
#
#########################################################################################
def PreprocessBam( bamFileName, fastaFileName, orphanBamFileName=None,
minCoverage=6, maxCoverage=50,
minMapQ=20, minAnchor=5, maxReadStretch=200, min_isize=500, min_tandemlength=5 ):
""" Computes indel error model from BAM file, and extracts orphan reads.
The min/maxCoverage variables should be set to reasonable limits to exclude iffy regions.
maxReadStretch is the maximum aligned extent of a read. """
MAX_REPEAT_LENGTH = 64 # fixed maximum due to ffsll instruction (coreutils/tandem.c)
bamfile = htslibWrapper.Samfile( bamFileName, mode='rb' )
# extract list of read group identifiers
readgroups = bamfile.header.get('RG',None)
if type(readgroups) == dict: readgroups = [readgroups]
if readgroups:
readgroupdict = {}
for idx, ident in enumerate( rg.get('ID',"") for rg in readgroups ):
readgroupdict[ident] = idx
# open orphan bam - write SAM file for now
if orphanBamFileName:
#orphanBamFile = htslibWrapper.Samfile( orphanBamFileName, mode='wb', template=bamfile )
orphanSamFile = open( orphanBamFileName, 'w' )
orphanSamFile.write( bamfile.text )
else:
#orphanBamFile = None
orphanSamFile = None
# open fasta file
fastafile = pysam.Fastafile( fastaFileName )
# make a coverage ring buffer
covbuflen = max( maxReadStretch*2, 1000 )
coveragebuf = CoverageBuffer( covbuflen )
# make read buffer, to implement limited lookback
readbuffer = Readbuffer( bamfile, size = maxReadStretch + MAX_UNIT_LENGTH + MAX_REPEAT_LENGTH )
# make indel queue
indelqueue = Indelqueue()
# make repetitive region queue
repeatqueue = Repeatqueue( fastafile, covbuflen, maxReadStretch + MAX_UNIT_LENGTH + MAX_REPEAT_LENGTH,
min_tandemlength = min_tandemlength )
# make object to collect results
indelhistogram = IndelHistogram( minCoverage, maxCoverage )
# setup main loop
curchrom = -1 # int!
chrchromS = "" # string
curpos = -1
repeatqueue.reset( curchromS, curpos )
for read in readbuffer:
# extract index of read group
if readgroups:
readgroup = read.opt('RG')
readgroupidx = readgroupdict[readgroup]
else:
readgroupidx = 0
# decide whether to write as orphan read
if ( orphanSamFile and
read.is_paired() and
(not read.is_qc_fail()) and
(not read.is_duplicate()) and
read.mapq() >= minMapQ and
(not read.is_proper_pair()) and
(not read.mate_is_unmapped()) and
(not read.is_unmapped()) and
(read.rname() != read.mrnm() or abs(read.isize()) > min_isize) ):
isize = read.isize()
if isize == 0: isizeS = "*"
else: isizeS = str(-isize)
if readgroups: rgS = "\tRG:Z:" + readgroup
else: rgS = ""
flag = read.flag() # TODO: swap 1st/2nd, strand/matestrand, etc.
orphanSamFile.write( "%s\t%i\t%s\t%i\t%i\t*\t%s\t%i\t%i\t%s\t%s\t%s%s\n" % (read.fastQName(),
flag,
bamfile.getrname( read.mrnm() ),
read.npos(),
read.mapq(),
bamfile.getrname( read.rname() ),
read.pos(),
isize,
read.seq(),
read.qual(),
rgS) )
# filter
if (read.is_unmapped() or
(read.is_paired() and not read.is_proper_pair()) or
read.is_qc_fail() or
read.is_duplicate() or
read.mapq() < minMapQ):
continue
# enter any indels in queue
indelqueue.enter( read )
# prepare to enter read into coverage buffer -- flush buffer
readpos = read.pos()
readchrom = read.rname() # int!
# calculate position to flush to
if readchrom == curchrom:
assert readpos >= curpos # BAM should be sorted
flushpos = min(readpos, curpos + covbuflen)
else:
if curchrom > -1:
flushpos = curpos + covbuflen
else:
flushpos = curpos
# get positions of next repetitive region, and next indel;
# these are guaranteed to be on curchrom
indelpos, indelallele = indelqueue.nextindel()
repeatqueue.setpos( flushpos )
repeatstart, repeatend, repeatunit, repeatlength = repeatqueue.nextsegment()
# process region up to flushpos
curcov = coveragebuf.getcurcov()
while curpos < flushpos:
# 1. update coverage
curcov += coveragebuf.getupdate( curpos )
# 2. process any indel
alleles = defaultdict(int)
while indelpos <= curpos:
# only process indels outside repetitive regions
if curpos < repeatstart:
alleles[indelallele] += 1
indelpos, indelallele = indelqueue.nextindel()
if minCoverage <= curcov <= maxCoverage:
indelhistogram.add( curcov, 1, 1, alleles )
# 3. process repetitive regions
if curpos == repeatend:
# retrieve reads that fully overlap repetitive region,
# plus anchoring
reads = readbuffer.retrieve( repeatstart - minAnchor, repeatend + minAnchor )
repeatcov = len(reads)
if minCoverage <= repeatcov <= maxCoverage:
alleles = haplotypes( reads, repeatstart, repeatend )
indelhistogram.add( repeatcov, repeatunit, repeatlength, alleles )
repeatstart, repeatend, repeatunit, repeatlength = repeatqueue.nextsegment()
curpos += 1
# push back unprocessed indels and repeats
repeatqueue.pushback( repeatstart, repeatend, repeatunit, repeatlength )
indelqueue.pushback( indelpos, indelallele )
# process jumps and change of chromosome
if readchrom != curchrom:
curchrom = readchrom
curchromS = bamfile.getrname( readchrom )
curpos = readpos
coveragebuf.reset( curchrom, curpos )
repeatqueue.reset( curchromS, curpos )
elif flushpos < readpos:
curpos = readpos
repeatqueue.setpos( curpos )
# set current coverage
coveragebuf.setcurcov( curcov )
# enter read into coverage buffer
coveragebuf.enter( read )
# Done
indelhistogram.computeModels()
indelhistogram.output()
#########################################################################################
#
# Readbuffer, allows iteration as well as limited look-back
#
#########################################################################################
class ReadBuffer():
""" Allows sequential block access to reads """
def __init__(self, bamfile, size):
self.size = size
self.generator = bamfile.fetchAll()
self.buffer = []
self._bufend = 0
self._current = None
self._laststart = -1
self._lastrname = -1
self._resetbuf = False
self._next()
def _next(self):
current = self._current
if self._resetbuf:
self.buffer = [self._firstread]
self._bufend = 0 # first free location
self._laststart = -1
self._resetbuf = False
try:
self._current = self.generator.next()
except StopIteration:
self._current = None
if current:
if current.rname() != self._lastrname:
self._resetbuf = True
self._firstread = current # first read in new buffer
else:
if self._buffer[self._bufend].pos() < current.pos() - self.size:
# old read is stale; we can store read
self._buffer[self._bufend] = current
self._bufend = (self._bufend + 1) % self.size
else:
# old read is not stale; we need to extend buffer
# put the last-entered read at the end of the buffer, to allow
# append (repeatedly, if necessary)
if self._bufend != 0:
self._buffer = self._buffer[self._bufend:] + self._buffer[:self._bufend]
self._bufend = 0
self._buffer.append( current )
return current
def retrieve(self, start, end):
""" Returns reads fully overlapping [start,end) """
# start at end going backwards, for efficiency
idx = self._bufend
reads = []
while True:
idx = (idx - 1) % self.size
if self._buffer[idx].pos() < start:
break
if self._buffer[idx].end() > end:
idx = (idx - 1) % self.size
continue
reads.append( self._buffer[idx] )
if idx == self._bufend:
break
return reads
#########################################################################################
#
# Indelqueue
#
#########################################################################################
class Indelqueue:
def __init__(self, minAnchor):
self._heap = []
self.minAnchor = minAnchor
def enter(self, read):
n = read.getCigarLength()
if n == 0: return
putative_indels = []
is_anchored = False
nucpos = read.pos()
for i in range(n):
op = read.getCigarOpCode( i )
le = read.getCigarOpLength( i )
if op == CIGAR_M:
if le >= minAnchor:
if is_anchored:
# anchored left and right -- accept indels
for indel in putative_indels:
heapq.heappush( self._heap, indel )
putative_indels = []
is_anchored = True
nucpos += le
elif op == CIGAR_D:
putative_indels.append( (pos, -le) )
nucpos += le
elif op == CIGAR_I:
putative_indels.append( (pos, le) )
elif op == CIGAR_N:
nucpos += le
def nextindel(self):
if len(self._heap) == 0:
return MAX_POS, 0
return heapq.heappop(self._heap)
def pushback(self, indelpos, indelallele):
heapq.heappush( self._heap, (indelpos, indelallele) )
#########################################################################################
#
# Repeat region queue
#
#########################################################################################
class Repeatqueue:
def __init__(self, fastafile, lookahead, maxtandemsize, min_tandem_length = MIN_UNIT_LENGTH):
self.fastafile = fastafile
self.lookahead = lookahead
self.maxtandemsize = maxtandemsize
self.queue = []
self.min_tandem_length = min_tandem_length
self.reset( None, -MAX_POS )
def reset(self, chromS, pos):
self.chrom = chromS
self.curpos = -MAX_POS
self.setpos( pos )
def setpos(self, pos):
if pos < self.curpos: return
if not self.chrom: return
# read from self.curpos to pos+lookahead
# but do not process more than 2*lookahead
topos = pos + lookahead
frompos = max( self.curpos, pos - lookahead, 0 )
# obtain sequence
sequence = self.read( self.chrom, frompos - maxtandemsize, topos + maxtandemsize )
# get repeats as (pos, size, unit) elements
repeats = cerrormodel.get_repeats( sequence, self.min_tandem_length, frompos - maxtandemsize )
# enter into queue
for reppos, size, unit in repeats:
if self.curpos <= reppos < topos:
self.pushback( reppos, reppos + size, unit, size )
def read(self, chrom, start, end):
if start<0:
if end<=0: return "N" * (end-start)
return ("N" * (-start)) + self.read( chrom, 0, end )
seq = self.fastafile.fetch( chrom, start, end )
# TODO: check length of reference, and add Ns myself if required
assert len(seq) > 0
if len(seq) < end-start:
seq += "N"*(end-start-len(seq))
return seq
def nextsegment(self):
return repeatstart, repeatend, repeatunit, repeatlength
def pushback(self, repeatstart, repeatend, repeatunit, repeatlength):
heapq.heappush( self.queue, (repeatstart, repeatend, repeatunit, repeatlength) )
#########################################################################################
#
# Repeat region queue
#
#########################################################################################
class CoverageBuffer:
def __init__(self, covbuflen):
self.covbuflen = covbuflen
self.reset( None, 0 )
def reset( self, chromId, pos):
self.covbuf = [0] * self.covbuflen
self.curpos = pos
self.curcov = 0
def getcurcov(self):
return self.curcov
def setcurcov(self, curcov):
self.curcov = curcov
def getupdate(self, pos):
update = self.covbuf[ self.curpos % self.covbuflen ]
self.covbuf[ self.curpos % self.covbuflen ] = 0
return update
def enter(self, read):
pos = read.pos() % self.covbuflen
end = read.end() % self.covbuflen
self.covbuf[pos] += 1
self.covbuf[end] -= 1
#########################################################################################
#
# Indel histogram
#
#########################################################################################
class IndelHistogram:
def __init__(self, minCoverage, maxCoverage):
self.minCoverage = minCoverage
self.maxCoverage = maxCoverage
self.minTandem = MIN_UNIT_LENGTH
self.maxTandem = MAX_UNIT_LENGTH
pass
def add( self, coverage, repeatunit, repeatlength, alleles ):
pass
def computeModels(self):
pass
def output(self):
pass
def haplotypes( reads, repeatstart, repeatend ):
return alleles
#
# Assume that indels with low support, in otherwise well covered regions, are errors?
#
maxprocessedmotifs = 1e10
thinner = 4/15.0 # fraction of reads considered
min_tot_count = 10 # minimum count to report a motif
Read = collections.namedtuple('Read','flag, rname, pos, mapq, cigar, mname, mpos, isize, seq, qual')
CIGAR_M = 0
CIGAR_I = 1
CIGAR_D = 2
CIGAR_N = 3
CIGAR_S = 4
CIGAR_MA = 7
CIGAR_X = 8
def readLengthOnGenome( read ):
""" returns length of read on genome """
if read.cigar: return sum( [leng for op,leng in read.cigar if op in (CIGAR_M,CIGAR_D,CIGAR_N,CIGAR_MA,CIGAR_X)] )
if read.seq: return len(read.seq)
return 0
class ControlledPopen:
def __init__(self, arguments):
self.args = arguments
def __enter__(self):
self.pipe = subprocess.Popen( self.args, stdout = subprocess.PIPE)
return self.pipe
def __exit__(self, type, value, traceback):
self.pipe.kill()
del self.pipe
def readGenerator( bamfilename, chromosome ):
""" Generates reads """
arguments = ["samtools","view",bamfilename,chromosome]
with ControlledPopen( arguments) as pipe:
for line in pipe.stdout:
if line.startswith('@'):
continue
if random.random() > thinner:
continue
label, flagS, rname, posS, mapqS, cigarS, mname, mposS, isizeS, seq, qual = line[:-1].split('\t')[:11]
flag = int(flagS)
mapq = int(mapqS)
if posS == "*":
pos = -1
else:
pos = int(posS)
if mposS == "*":
mpos = -1
else:
mpos = int(mposS)
if cigarS == "*":
cigar = None
else:
cigar = []
p = -1
for arg in re.split('[MIDNSHP=X]',cigarS)[:-1]:
p += len(arg) + 1
op = 'MIDNSHP=X'.find(cigarS[p])
cigar.append( (op, int(arg)) )
yield Read(flag=flag,
rname=rname,
pos=int(posS),
mapq=mapq,
cigar=cigar,
mname=mname,
mpos=mpos,
isize=int(isizeS),
seq=seq,
qual=qual)
def filterReads( generator, minmapq=10, minanchor=5 ):
""" Filters for properly mapped reads, and imposes minimum mapQ.
Remove any read with >1 gap
Remove reads not anchored with minimum anchor length """
for read in generator:
if (read.flag & 4 == 1) or (read.flag & 2 != 2) or read.pos == -1 or read.mapq < minmapq:
continue
gaps = 0
matches = []
for op,arg in read.cigar:
if op==CIGAR_I or op==CIGAR_D:
gaps += 1
elif op==CIGAR_M:
matches.append(arg)
if gaps > 1:
continue
if len(matches)==0 or matches[0] < minanchor or matches[-1] < minanchor:
continue
yield read
def motifGenerator( infile, chromosome ):
""" reads motif file """
for line in infile:
chrom, pos, hlen, hom, tlen, tandem = line[:-1].split('\t')
assert chrom == chromosome
yield (chrom, int(pos), int(tlen), tandem)
def haplotypes( reads, start, end ):
""" Generates list of indel haplotypes (+:insertion, -:deletion, 0:reference) supported by the reads, and their multiplicity """
haps = {}
for read in reads:
indel = 0
pos = read.pos
for op,arg in read.cigar:
if op == CIGAR_M or op == CIGAR_N:
pos += arg
elif op == CIGAR_I:
if start <= pos <= end+1:
indel = arg
elif op == CIGAR_D:
if start <= pos <= end+1:
indel = -arg
pos += arg
haps[indel] = haps.get(indel,0) + 1
return haps
def estimateErrorRate( chromosome, motiffile, lowcovbam,
minmapq=10, minanchor=5, coverage=5 ):
motifgenerator = motifGenerator( motiffile, chromosome )
lowcovBuf = ReadBuffer( filterReads( readGenerator( lowcovbam, chromosome ),
minmapq=minmapq, minanchor=minanchor ) )
counts = {} # the distribution of non-ref allele coverage, for single-allele sites
num = 0
numtried = 0
try:
for motif in motifgenerator:
chrom, pos, tlen, tunit = motif
start = pos - minanchor
end = pos + tlen + minanchor + 1
reads = lowcovBuf.retrieve(start,end)
numtried += 1
cov = len(reads)
if cov < 4 or cov > coverage: continue
haps = haplotypes( reads, pos, pos+tlen )
key = "%s:%s" % (tunit, tlen)
if len(haps)>2:
# aggregate the minor alleles
alleles = sorted( [(count,hap) for hap,count in haps.iteritems()] )
minors = sum( count for (count,hap) in alleles[:-1] )
haps = { alleles[0][1]: minors,
alleles[-1][1]: alleles[-1][0] }
# if two alleles are present, but not the reference allele, map the major allele to the reference
if len(haps)==2 and 0 not in haps:
alleles = sorted( [(count,hap) for hap,count in haps.iteritems()] )
haps = { alleles[0][1]: alleles[0][0],
0: alleles[1][0] }
# calculate non-ref allele count
count = sum( count for hap,count in haps.iteritems() if hap != 0 )
histogram = counts.get(key,[0]*((coverage-3)*(coverage+1))) # include hom alt counts
histogram[(cov-4)*(coverage+1)+count] += 1
counts[key] = histogram
num += 1
if num > maxprocessedmotifs: break
except Exception:
raise
# create aggregate counts
for key in set( counts.keys() ):
tunit, tlen = key.split(':')
newkey = "%s:%s" % (len(tunit),tlen)
hist = counts.get(newkey, [0]*len(counts[key]))
for idx,c in enumerate(counts[key]):
hist[idx] += c
counts[newkey] = hist
# remove keys with small counts
for key in set( counts.keys() ):
if sum(counts[key]) < min_tot_count:
del counts[key]
report( counts, coverage )
def report( counts, coverage ):
# build output
output = []
for key in counts.keys():
tunit, tlen = key.split(':')
tlen = int(tlen)
try: tunit = int(tunit)
except Exception: pass
# fit model to counts
N00, N01, N11, epsilon, beta = fitmodel( counts[key], coverage )
output.append( (tunit, tlen, "%s\t%s\t%s\t%1.6f\t%1.6f\t%1.6f" % (tunit,
tlen,
counts[key],
N01/(N00+N01+N11+1e-10),
beta,
epsilon ) ) )
output.sort()
for u,l,line in output:
print line
def parse_counts( infile ):
counts = {}
for line in infile:
if line.startswith('#'):
continue
elts = line[:-1].split('\t')
if len(elts) < 3:
continue
unit = elts[0]
length = int(elts[1])
count = eval(elts[2])
try:
# convert numbers into ints; leave tandem units alone
unit = int(unit)
except Exception:
pass
key = "%s:%s" % (unit,length)
count0 = counts.get(key,None)
if count0:
for idx,c in enumerate(count0):
count[idx] += c
counts[key] = count
return counts
def multimodel(pars, counts, maxcoverage):
ll = 0
N = float(sum(counts))
for i in range(0, len(counts), maxcoverage+1):
cov = (i // (maxcoverage+1)) + 4
ll += model( pars, counts[i:i+cov+1], N )
return ll
def model( pars, counts, N ):
""" computes rate, lambda of the possible outcomes --- to score a Poisson approximation. """
cov = len(counts)-1 # for coverage C, counts array includes counts for 0,..,C
lambdas_noerr = [0] * (cov+1)
lambdas = [0] * (cov+1)
cov_scaling = sum(counts)/N
N00, N01, N11, epsilon, beta = pars
N00 *= cov_scaling
N01 *= cov_scaling
N11 *= cov_scaling
# hom ref model
lambdas_noerr[0] = N00
# het model
for k in range(0,cov+1):
lambdas_noerr[k] += N01 * ncr(cov,k) * math.pow(beta,k) * math.pow(1-beta,cov-k)
# hom alt model
lambdas_noerr[cov] = N11
# error model
for k in range(0, cov+1):
# 0 errors
lambdas[k] += math.pow(1-epsilon,cov) * lambdas_noerr[k]
# 1 error
factor = cov*epsilon*math.pow(1-epsilon,cov-1)
if k>0:
lambdas[k-1] += k*factor*lambdas_noerr[k]/cov
if k<cov:
lambdas[k+1] += (cov-k)*factor*lambdas_noerr[k]/cov
# 2 errors
factor = cov*(cov-1)*0.5*epsilon*epsilon*math.pow(1-epsilon,cov-2)
if k>1:
lambdas[k-2] += k*(k-1)*factor*lambdas_noerr[k]/(cov*(cov-1))
if k<cov-1:
lambdas[k+2] += (cov-k)*(cov-1-k)*factor*lambdas_noerr[k]/(cov*(cov-1))
# one error on a ref background, one on an alt background
lambdas[k] += 2*k*(cov-k)*factor*lambdas_noerr[k]/(cov*(cov-1))
# likelihood
ll = 0
for k in range(0,cov+1):
lmda = lambdas[k]
ll += counts[k]*math.log(lmda + 1e-10) - lmda - logstirling(counts[k])
return ll
def fitmodel( counts, coverage ):
""" fits a (homref + error) + het model to count data """
N00, N01, N11 = 0,0,0
for i in range(0, len(counts), coverage+1):
N00 += float(counts[i])
N01 += float(sum(counts[i+1:i+coverage]))
N11 += float(counts[i+coverage])
eps = 0.001
beta = 0.5
pars = [N00,N01,N11,eps,beta]
dpars = [0.05,0.05,0.05,0.05,0.05]
minpars = [0.01,0.01,0.01,1e-8,0.35]
maxpars = [1e10,1e10,1e10,0.2,0.65]
ddpars = 0.9
k = 0
ll = multimodel( pars, counts, coverage )
change = 1
while sum(dpars)>0.001 and (change + k)>0:
if k == 0: change = 0
parsplus = pars[:]
parsminus = pars[:]
parsplus[k] *= 1.0 + dpars[k]
parsminus[k] /= 1.0 + dpars[k]
if parsplus[k] < maxpars[k]:
llplus = multimodel( parsplus, counts, coverage )
else:
llplus = ll
if parsminus[k] > minpars[k]:
llminus = multimodel( parsminus, counts, coverage )
else:
llminus = ll
if ll >= max(llplus, llminus):
dpars[k] *= ddpars
change += 1
elif llplus > max(ll, llminus):
pars[k] = parsplus[k]
ll = llplus
change += 1
else:
pars[k] = parsminus[k]
ll = llminus
change += 1
k = (k+1) % len(pars)
return pars
#
# main
#
if len(sys.argv) not in [1,2,4,5]:
print "Usage: %s chromosome motiffile lowcovbam [maxcoverage]" % sys.argv[0]
print "Usage: %s [maxcoverage] < output"
sys.exit(1)
coverage = 5
minmapq = 30
minanchor = 5
if len(sys.argv) in [1,2]:
if len(sys.argv) == 2:
coverage= int(sys.argv[1])
counts = parse_counts( sys.stdin )
report( counts, coverage )
sys.exit(0)
chromosome, motiffilename, lowcovbam = sys.argv[1:4]
if len(sys.argv) == 5:
coverage = int(sys.argv[4])
if coverage > 10:
thinner = 1.0
motiffile = filez.open(motiffilename)
print "# chromosome \t",chromosome
print "# bamfile \t",lowcovbam
print "# motifs \t",motiffilename
print "# maxcoverage \t",coverage
print "# processed motifs\t",maxprocessedmotifs
print "# thinner \t",thinner
print "# min_tot_count \t",min_tot_count
print "# minanchor \t",minanchor
print "# minmapq \t",minmapq
estimateErrorRate( chromosome, motiffile, lowcovbam,
minmapq=minmapq, minanchor=minanchor, coverage=coverage)
|
andyrimmer/Platypus
|
src/python/lowcov.py
|
Python
|
gpl-3.0
| 29,375
|
[
"pysam"
] |
73765c5e2fd0e163c60c2d3e831c3455cb54089730c5dd89777c84310113a5ba
|
"""Test for the PLOT3D reader and its multiblock capabilities. This
also tests the SelectOutput filter.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Prabhu Ramachandran
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from io import BytesIO
import copy
# Local imports.
from common import TestCase, get_example_data
class TestPLOT3DReader(TestCase):
def test(self):
self.main()
def do(self):
############################################################
# Imports.
script = self.script
from mayavi.sources.plot3d_reader import PLOT3DReader
from mayavi.filters.select_output import SelectOutput
from mayavi.modules.outline import Outline
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
# Read the multi-block plot3d file.
r = PLOT3DReader()
r.reader.set(has_byte_count=True, multi_grid=True,
byte_order='little_endian')
r.initialize(get_example_data('tiny.xyz'),
get_example_data('tiny.q'),
configure=False)
script.add_source(r)
# Add the filter.
f = SelectOutput()
script.add_filter(f)
# Create an outline for the data.
o = Outline()
script.add_module(o)
# Check the bounds of the outline.
assert o.outline_filter.output.bounds == (1.0, 2.0, 1.0, 2.0, 1.0, 2.0)
# Copy the reader to see if it does not pop up the UI.
r1 = copy.deepcopy(r)
script.add_source(r1)
s.render()
o1 = r1.children[0].children[0].children[0]
assert o1.outline_filter.output.bounds == (1.0, 2.0, 1.0, 2.0, 1.0, 2.0)
r1.children[0].output_index = 1
assert o1.outline_filter.output.bounds == (2.0, 3.0, 1.0, 2.0, 1.0, 2.0)
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
o = s.children[0].children[0].children[0].children[0]
o1 = s.children[1].children[0].children[0].children[0]
assert o.outline_filter.output.bounds == (1.0, 2.0, 1.0, 2.0, 1.0, 2.0)
assert o1.outline_filter.output.bounds == (2.0, 3.0, 1.0, 2.0, 1.0, 2.0)
# If we have come this far, we are golden!
return
if __name__ == "__main__":
t = TestPLOT3DReader()
t.test()
|
dmsurti/mayavi
|
integrationtests/mayavi/test_plot3d_mb_reader.py
|
Python
|
bsd-3-clause
| 2,946
|
[
"Mayavi"
] |
e1a6ee4402b551b550978d920bf3d89c9f9448cc8770a61c971d4dc8c1573e2e
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for heterogeneous compilation and execution."""
import json
import numpy as np
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.relay.expr_functor import ExprMutator
from tvm.relay import transform
import tvm.testing
def _trace(module, metadata, _):
if metadata.name == "ManifestAlloc":
pass # import pdb; pdb.set_trace()
def check_graph_runtime(
target, ref_res, device, func, params, config, opt_level, expected_index=None
):
with tvm.transform.PassContext(opt_level=opt_level, config=config):
graph, lib, new_params = relay.build(func, target, params=params)
contexts = [tvm.cpu(0), tvm.context(device)]
graph_json = json.loads(graph)
if "device_index" in graph_json["attrs"]:
device_index = graph_json["attrs"]["device_index"][1]
assert device_index == expected_index
mod = graph_runtime.create(graph, lib, contexts)
mod.set_input(**new_params)
mod.run()
res = mod.get_output(0).asnumpy()
tvm.testing.assert_allclose(res, ref_res, rtol=1e-5, atol=1e-5)
def check_vm_runtime(target, ref_res, device, func, params, config, opt_level, expected_index=None):
with tvm.transform.PassContext(opt_level=opt_level, trace=_trace, config=config):
mod = tvm.IRModule()
mod["main"] = func
exe = relay.vm.compile(mod, target)
ctx = [tvm.cpu(0), tvm.context(device)]
vm = tvm.runtime.vm.VirtualMachine(exe, ctx)
res = vm.invoke("main", **params)
tvm.testing.assert_allclose(res.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
return mod["main"]
def test_redundant_annotation():
ctx1 = tvm.context(1)
ctx2 = tvm.context(2)
x = relay.var("x", shape=(3,))
y = relay.var("y", shape=(3,))
z = relay.var("z", shape=(3,))
def annotated():
add = relay.add(x, y)
_add1 = relay.annotation.on_device(add, ctx2)
_add2 = relay.annotation.on_device(add, ctx2)
sub1 = relay.subtract(_add1, z)
sub2 = relay.subtract(_add2, z)
func = relay.Function([x, y, z], relay.Tuple([sub1, sub2]))
func = run_opt_pass(func, transform.RewriteAnnotatedOps(ctx1.device_type))
return func
def expected():
add = relay.add(x, y)
copy_add_sub1 = relay.device_copy(add, ctx2, ctx1)
sub1 = relay.subtract(copy_add_sub1, z)
copy_add_sub2 = relay.device_copy(add, ctx2, ctx1)
sub2 = relay.subtract(copy_add_sub2, z)
func = relay.Function([x, y, z], relay.Tuple([sub1, sub2]))
return func
annotated_func = annotated()
expected_func = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(annotated_func, expected_func)
def test_annotate_expr():
ctx1 = tvm.context(1)
ctx2 = tvm.context(2)
x = relay.var("x", shape=(3,))
y = relay.var("y", shape=(3,))
z = relay.var("z", shape=(3,))
def annotated():
add = relay.add(x, y)
_add = relay.annotation.on_device(add, ctx1)
sub = relay.subtract(_add, z)
_sub = relay.annotation.on_device(sub, ctx2)
expr = run_opt_pass(_sub, transform.RewriteAnnotatedOps(ctx1.device_type))
return expr
def expected():
add = relay.add(x, y)
copy_add_sub = relay.device_copy(add, ctx1, ctx2)
sub = relay.subtract(copy_add_sub, z)
return sub
annotated_expr = annotated()
expected_expr = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(annotated_expr, expected_expr)
def test_annotate_all():
ctx1 = tvm.context(1)
ctx2 = tvm.context(2)
x = relay.var("x", shape=(3,))
y = relay.var("y", shape=(3,))
z = relay.var("z", shape=(3,))
def annotated():
add = relay.add(x, y)
_add = relay.annotation.on_device(add, ctx2)
sub = relay.subtract(_add, z)
_sub = relay.annotation.on_device(sub, ctx2)
func = relay.Function([x, y, z], _sub)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(ctx1.device_type))
return func
def expected():
add = relay.add(x, y)
sub = relay.subtract(add, z)
func = relay.Function([x, y, z], sub)
return func
annotated_func = annotated()
expected_func = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(annotated_func, expected_func)
def test_annotate_none():
ctx1 = tvm.context(1)
ctx2 = tvm.context(2)
x = relay.var("x", shape=(3,))
y = relay.var("y", shape=(3,))
z = relay.var("z", shape=(3,))
def annotated():
add = relay.add(x, y)
sub = relay.subtract(add, z)
func = relay.Function([x, y, z], sub)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(ctx1.device_type))
return func
def expected():
add = relay.add(x, y)
sub = relay.subtract(add, z)
func = relay.Function([x, y, z], sub)
return func
annotated_func = annotated()
expected_func = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(annotated_func, expected_func)
def check_annotated_graph(annotated_func, expected_func):
annotated_func = run_opt_pass(annotated_func, transform.InferType())
expected_func = run_opt_pass(expected_func, transform.InferType())
assert tvm.ir.structural_equal(annotated_func, expected_func)
def test_conv_network():
R"""The network is as following:
data1 data2
| |
conv2d conv2d
\ /
add
|
conv2d
"""
batch_size = 1
dshape = (batch_size, 64, 56, 56)
weight = relay.var("weight", shape=(64, 64, 3, 3))
data1 = relay.var("data1", shape=dshape)
data2 = relay.var("data2", shape=dshape)
dev1 = tvm.context(1)
dev2 = tvm.context(2)
def original():
conv2d_1 = relay.nn.conv2d(data1, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
conv2d_2 = relay.nn.conv2d(data2, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d_1, conv2d_2)
conv2d_3 = relay.nn.conv2d(add, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
func = relay.Function([data1, data2, weight], conv2d_3)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(tvm.context(3).device_type))
return func
def annotated():
conv2d_1 = relay.nn.conv2d(data1, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
_conv2d_1 = relay.annotation.on_device(conv2d_1, dev2)
conv2d_2 = relay.nn.conv2d(data2, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
_conv2d_2 = relay.annotation.on_device(conv2d_2, dev2)
add = relay.add(_conv2d_1, _conv2d_2)
_add = relay.annotation.on_device(add, dev1)
conv2d_3 = relay.nn.conv2d(_add, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
_conv2d_3 = relay.annotation.on_device(conv2d_3, dev2)
func = relay.Function([data1, data2, weight], _conv2d_3)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(tvm.context(3).device_type))
return func
class ScheduleConv2d(ExprMutator):
def __init__(self, device):
self.device = device
super().__init__()
def visit_call(self, expr):
visit = super().visit_call(expr)
if expr.op == tvm.relay.op.get("nn.conv2d"):
return relay.annotation.on_device(visit, self.device)
else:
return visit
def annotate_with_visitor(func):
sched = ScheduleConv2d(dev2)
func = sched.visit(func)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(dev1.device_type))
return func
def expected():
conv2d_1 = relay.nn.conv2d(data1, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
device_copy1 = relay.device_copy(conv2d_1, dev2, dev1)
conv2d_2 = relay.nn.conv2d(data2, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
device_copy2 = relay.device_copy(conv2d_2, dev2, dev1)
add = relay.add(device_copy1, device_copy2)
device_copy3 = relay.device_copy(add, dev1, dev2)
conv2d_3 = relay.nn.conv2d(
device_copy3, weight, channels=64, kernel_size=(3, 3), padding=(1, 1)
)
func = relay.Function([data1, data2, weight], conv2d_3)
return func
def check_storage_and_device_types():
func = annotated()
func = run_opt_pass(func, [transform.RewriteAnnotatedOps(3), transform.FuseOps(2)])
smap = relay.backend._backend.GraphPlanMemory(func)
storage_ids = []
device_types = []
for _, storage_dev_type in smap.items():
assert len(storage_dev_type) == 2
for sid in storage_dev_type[0]:
storage_ids.append(sid.value)
for did in storage_dev_type[1]:
device_types.append(did.value)
assert len(storage_ids) == 10
assert len(set(storage_ids)) == 8
assert len(set(device_types)) == 2
assert set(device_types) == {1, 2}
def test_manual_annotation():
annotated_func = annotated()
expected_func = expected()
check_annotated_graph(annotated_func, expected_func)
check_storage_and_device_types()
def test_visitor_annotation():
annotated_func = annotate_with_visitor(original())
expected_func = expected()
check_annotated_graph(annotated_func, expected_func)
test_manual_annotation()
test_visitor_annotation()
def test_propogation():
R""" The network and device type is as following:
x 1
|
log 1
/ \
log2 log10 2
\ /
add 2
|
tan 1
"""
ctx1 = tvm.context(1)
ctx2 = tvm.context(2)
expected_dev_type = {"log": ctx1, "log2": ctx2, "log10": ctx2, "add": ctx2, "tan": ctx1}
x = relay.var("x", shape=(3,))
def annotated():
log = relay.log(x)
_log = relay.annotation.on_device(log, expected_dev_type["log"])
log2 = relay.log2(_log)
_log2 = relay.annotation.on_device(log2, expected_dev_type["log2"])
log10 = relay.log10(_log)
_log10 = relay.annotation.on_device(log10, expected_dev_type["log10"])
add = relay.add(_log2, _log10)
_add = relay.annotation.on_device(add, expected_dev_type["add"])
tan = relay.tan(_add)
_tan = relay.annotation.on_device(tan, expected_dev_type["tan"])
func = run_opt_pass(_tan, transform.RewriteAnnotatedOps(ctx1.device_type))
return func
def expected():
log = relay.log(x)
_log_left = relay.device_copy(log, ctx1, ctx2)
_log_right = relay.device_copy(log, ctx1, ctx2)
log2 = relay.log2(_log_left)
log10 = relay.log10(_log_right)
add = relay.add(log2, log10)
_add = relay.device_copy(add, ctx2, ctx1)
tan = relay.tan(_add)
func = run_opt_pass(tan, transform.InferType())
return func
annotated_expr = annotated()
expected_expr = expected()
assert tvm.ir.structural_equal(annotated_expr, expected_expr)
smap = relay.backend._backend.GraphPlanMemory(annotated_expr)
for expr, storage_dev_type in smap.items():
# x is ctx1 as output is ctx1
if isinstance(expr, tvm.relay.expr.Var):
assert storage_dev_type[1][0] == ctx1.device_type
else:
# device_copy op should be its dst_dev_type
if isinstance(expr.attrs, tvm.relay.op.op_attrs.DeviceCopyAttrs):
assert storage_dev_type[1][0] == expr.attrs.dst_dev_type
else:
assert storage_dev_type[1][0] == expected_dev_type[expr.op.name].device_type
def run_fusible_network(dev, tgt):
R""" The network is as following:
x y
\ /
add
/ \
sqrt log
\ /
subtract
|
exp
"""
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(10, 10))
x_data = np.random.rand(1, 10).astype("float32")
y_data = np.random.rand(10, 10).astype("float32")
tmp_add = x_data + y_data
tmp_sqrt = np.sqrt(tmp_add)
tmp_log = np.log(tmp_add)
tmp_sub = np.subtract(tmp_sqrt, tmp_log)
ref_res = np.exp(tmp_sub)
params = {"x": x_data, "y": y_data}
def get_func():
add = relay.add(x, y)
sqrt = relay.sqrt(add)
log = relay.log(add)
subtract = relay.subtract(sqrt, log)
exp = relay.exp(subtract)
func = relay.Function([x, y], exp)
return func
def test_fuse_log_add(device, tgt):
""" Only log and add are fused."""
fallback_device = tvm.context("cpu")
target = {"cpu": "llvm", device: tgt}
cpu_ctx = fallback_device
dev_ctx = tvm.context(device)
def annotated():
add = relay.add(x, y)
sqrt = relay.sqrt(add)
_sqrt = relay.annotation.on_device(sqrt, dev_ctx)
log = relay.log(add)
subtract = relay.subtract(_sqrt, log)
exp = relay.exp(subtract)
_exp = relay.annotation.on_device(exp, dev_ctx)
func = relay.Function([x, y], _exp)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(cpu_ctx.device_type))
return func
def expected():
add = relay.add(x, y)
copy_add_sqrt = relay.device_copy(add, cpu_ctx, dev_ctx)
sqrt = relay.sqrt(copy_add_sqrt)
log = relay.log(add)
copy_sqrt_subtract = relay.device_copy(sqrt, dev_ctx, cpu_ctx)
subtract = relay.subtract(copy_sqrt_subtract, log)
copy_sub_exp = relay.device_copy(subtract, cpu_ctx, dev_ctx)
exp = relay.exp(copy_sub_exp)
func = relay.Function([x, y], exp)
return func
annotated_func = annotated()
expected_func = expected()
ctx = tvm.context(device, 0)
dev_idx = ctx.device_type
expected_index = [1, 1, 1, dev_idx, dev_idx, 1, 1, dev_idx, dev_idx]
check_annotated_graph(annotated_func, expected_func)
opt_level = 1
config = {"relay.fallback_device_type": fallback_device.device_type}
check_graph_runtime(
target, ref_res, device, annotated_func, params, config, opt_level, expected_index
)
opt_level = 2
check_vm_runtime(
target, ref_res, device, annotated_func, params, config, opt_level, expected_index
)
def test_fuse_all(device, tgt):
"""Fuse all operators."""
fallback_device = tvm.context("cpu")
target = {"cpu": "llvm", device: tgt}
cpu_ctx = fallback_device
dev_ctx = tvm.context(device)
def annotated():
add = relay.add(x, y)
_add = relay.annotation.on_device(add, dev_ctx)
sqrt = relay.sqrt(_add)
_sqrt = relay.annotation.on_device(sqrt, dev_ctx)
log = relay.log(_add)
_log = relay.annotation.on_device(log, dev_ctx)
subtract = relay.subtract(_sqrt, _log)
_subtract = relay.annotation.on_device(subtract, dev_ctx)
exp = relay.exp(_subtract)
_exp = relay.annotation.on_device(exp, dev_ctx)
func = relay.Function([x, y], _exp)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(cpu_ctx.device_type))
return func
annotated_func = annotated()
expected_func = get_func()
check_annotated_graph(annotated_func, expected_func)
opt_level = 1
config = {"relay.fallback_device_type": fallback_device.device_type}
check_graph_runtime(target, ref_res, device, annotated_func, params, config, opt_level)
opt_level = 2
check_vm_runtime(target, ref_res, device, annotated_func, params, config, opt_level)
def test_fallback_exp(device, tgt):
fallback_device = tvm.context("cpu")
target = {"cpu": "llvm", device: tgt}
cpu_ctx = fallback_device
dev_ctx = tvm.context(device)
def annotated():
add = relay.add(x, y)
sqrt = relay.sqrt(add)
log = relay.log(add)
subtract = relay.subtract(sqrt, log)
exp = relay.exp(subtract)
_exp = relay.annotation.on_device(exp, cpu_ctx)
func = relay.Function([x, y], _exp)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(dev_ctx.device_type))
return func
def expected():
add = relay.add(x, y)
sqrt = relay.sqrt(add)
log = relay.log(add)
subtract = relay.subtract(sqrt, log)
copy_sub_exp = relay.device_copy(subtract, dev_ctx, cpu_ctx)
exp = relay.exp(copy_sub_exp)
func = relay.Function([x, y], exp)
return func
annotated_func = annotated()
expected_func = expected()
ctx = tvm.context(device, 0)
dev_idx = ctx.device_type
expected_index = [dev_idx, dev_idx, dev_idx, 1, 1]
opt_level = 1
config = {"relay.fallback_device_type": fallback_device.device_type}
check_annotated_graph(annotated_func, expected_func)
check_graph_runtime(
target, ref_res, device, annotated_func, params, config, opt_level, expected_index
)
opt_level = 2
check_vm_runtime(
target, ref_res, device, annotated_func, params, config, opt_level, expected_index
)
def test_fallback_all_operators(device, tgt):
target = {device: tgt, "cpu": "llvm"}
annotated_func = get_func()
expected_func = get_func()
check_annotated_graph(annotated_func, expected_func)
opt_level = 2
check_graph_runtime(target, ref_res, device, annotated_func, params, {}, opt_level)
check_vm_runtime(target, ref_res, device, annotated_func, params, {}, opt_level)
test_fuse_log_add(dev, tgt)
test_fuse_all(dev, tgt)
test_fallback_exp(dev, tgt)
test_fallback_all_operators(dev, tgt)
def run_unpropagatable_graph(dev, tgt):
R"""The network is as following:
a b c d
\ / \ /
add mul
\ /
subtract
"""
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
c = relay.var("c", shape=(10, 10))
d = relay.var("d", shape=(10, 10))
a_data = np.random.rand(10, 10).astype("float32")
b_data = np.random.rand(10, 10).astype("float32")
c_data = np.random.rand(10, 10).astype("float32")
d_data = np.random.rand(10, 10).astype("float32")
tmp_add = a_data + b_data
tmp_mul = np.multiply(c_data, d_data)
ref_res = np.subtract(tmp_add, tmp_mul)
fallback_device = tvm.context("cpu")
target = {"cpu": "llvm", dev: tgt}
cpu_ctx = fallback_device
dev_ctx = tvm.context(dev)
def annotated():
add = relay.add(a, b)
_add = relay.annotation.on_device(add, dev_ctx)
mul = relay.multiply(c, d)
_mul = relay.annotation.on_device(mul, cpu_ctx)
sub = relay.subtract(_add, _mul)
_sub = relay.annotation.on_device(sub, dev_ctx)
func = relay.Function([a, b, c, d], _sub)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(dev_ctx.device_type))
return func
def expected():
add = relay.add(a, b)
mul = relay.multiply(c, d)
copy_mul_sub = relay.device_copy(mul, cpu_ctx, dev_ctx)
sub = relay.subtract(add, copy_mul_sub)
func = relay.Function([a, b, c, d], sub)
return func
annotated_func = annotated()
expected_func = expected()
expected_index = [2, 2, 2, 1, 1, 1, 2, 2]
check_annotated_graph(annotated_func, expected_func)
params = {"a": a_data, "b": b_data, "c": c_data, "d": d_data}
opt_level = 0
config = {"relay.fallback_device_type": fallback_device.device_type}
check_graph_runtime(
target, ref_res, dev, annotated_func, params, config, opt_level, expected_index
)
opt_level = 2
check_vm_runtime(target, ref_res, dev, annotated_func, params, config, opt_level)
@tvm.testing.requires_opencl
def test_check_run_opencl():
dev = "opencl"
tgt = "opencl"
run_fusible_network(dev, tgt)
run_unpropagatable_graph(dev, tgt)
@tvm.testing.requires_opencl
def test_check_run_opencl_intel():
dev = "opencl"
tgt = str(tvm.target.intel_graphics())
run_fusible_network(dev, tgt)
run_unpropagatable_graph(dev, tgt)
@tvm.testing.requires_cuda
def test_check_run_cuda():
dev = "cuda"
tgt = "cuda"
run_fusible_network(dev, tgt)
run_unpropagatable_graph(dev, tgt)
@tvm.testing.requires_cuda
def test_tuple_get_item():
dev = "cuda"
cpu_ctx = tvm.cpu(0)
gpu_ctx = tvm.context(dev)
def expected():
x = relay.var("x", relay.ty.TensorType((3, 3, 4), "float32"))
split = relay.op.split(x, 3)
elem0 = relay.device_copy(split[0], gpu_ctx, cpu_ctx)
elem1 = relay.device_copy(split[1], gpu_ctx, cpu_ctx)
sub = elem0 - elem1
func = relay.Function(relay.analysis.free_vars(sub), sub)
return func
def annotated():
x = relay.var("x", relay.ty.TensorType((3, 3, 4), "float32"))
split = relay.op.split(x, 3)
split = split.astuple()
split = relay.annotation.on_device(split, gpu_ctx)
split = relay.TupleWrapper(split, 3)
sub = split[0] - split[1]
func = relay.Function(relay.analysis.free_vars(sub), sub)
func = run_opt_pass(func, transform.RewriteAnnotatedOps(cpu_ctx.device_type))
return func
annotated_func = annotated()
expected_func = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(annotated_func, expected_func)
if __name__ == "__main__":
test_redundant_annotation()
test_annotate_expr()
test_annotate_all()
test_annotate_none()
test_conv_network()
test_tuple_get_item()
|
sxjscience/tvm
|
tests/python/relay/test_pass_annotation.py
|
Python
|
apache-2.0
| 23,469
|
[
"VisIt"
] |
7e0886a231fb0a76693f1e3268f20bebcae955e66e10a078d40668128d420ae8
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on:schema_merging$
from pyspark.sql import Row
# $example off:schema_merging$
"""
A simple example demonstrating Spark SQL data sources.
Run with:
./bin/spark-submit examples/src/main/python/sql/datasource.py
"""
def basic_datasource_example(spark):
# $example on:generic_load_save_functions$
df = spark.read.load("examples/src/main/resources/users.parquet")
df.select("name", "favorite_color").write.save("namesAndFavColors.parquet")
# $example off:generic_load_save_functions$
# $example on:write_partitioning$
df.write.partitionBy("favorite_color").format("parquet").save("namesPartByColor.parquet")
# $example off:write_partitioning$
# $example on:write_partition_and_bucket$
df = spark.read.parquet("examples/src/main/resources/users.parquet")
(df
.write
.partitionBy("favorite_color")
.bucketBy(42, "name")
.saveAsTable("people_partitioned_bucketed"))
# $example off:write_partition_and_bucket$
# $example on:manual_load_options$
df = spark.read.load("examples/src/main/resources/people.json", format="json")
df.select("name", "age").write.save("namesAndAges.parquet", format="parquet")
# $example off:manual_load_options$
# $example on:write_sorting_and_bucketing$
df.write.bucketBy(42, "name").sortBy("age").saveAsTable("people_bucketed")
# $example off:write_sorting_and_bucketing$
# $example on:direct_sql$
df = spark.sql("SELECT * FROM parquet.`examples/src/main/resources/users.parquet`")
# $example off:direct_sql$
spark.sql("DROP TABLE IF EXISTS people_bucketed")
spark.sql("DROP TABLE IF EXISTS people_partitioned_bucketed")
def parquet_example(spark):
# $example on:basic_parquet_example$
peopleDF = spark.read.json("examples/src/main/resources/people.json")
# DataFrames can be saved as Parquet files, maintaining the schema information.
peopleDF.write.parquet("people.parquet")
# Read in the Parquet file created above.
# Parquet files are self-describing so the schema is preserved.
# The result of loading a parquet file is also a DataFrame.
parquetFile = spark.read.parquet("people.parquet")
# Parquet files can also be used to create a temporary view and then used in SQL statements.
parquetFile.createOrReplaceTempView("parquetFile")
teenagers = spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenagers.show()
# +------+
# | name|
# +------+
# |Justin|
# +------+
# $example off:basic_parquet_example$
def parquet_schema_merging_example(spark):
# $example on:schema_merging$
# spark is from the previous example.
# Create a simple DataFrame, stored into a partition directory
sc = spark.sparkContext
squaresDF = spark.createDataFrame(sc.parallelize(range(1, 6))
.map(lambda i: Row(single=i, double=i ** 2)))
squaresDF.write.parquet("data/test_table/key=1")
# Create another DataFrame in a new partition directory,
# adding a new column and dropping an existing column
cubesDF = spark.createDataFrame(sc.parallelize(range(6, 11))
.map(lambda i: Row(single=i, triple=i ** 3)))
cubesDF.write.parquet("data/test_table/key=2")
# Read the partitioned table
mergedDF = spark.read.option("mergeSchema", "true").parquet("data/test_table")
mergedDF.printSchema()
# The final schema consists of all 3 columns in the Parquet files together
# with the partitioning column appeared in the partition directory paths.
# root
# |-- double: long (nullable = true)
# |-- single: long (nullable = true)
# |-- triple: long (nullable = true)
# |-- key: integer (nullable = true)
# $example off:schema_merging$
def json_dataset_example(spark):
# $example on:json_dataset$
# spark is from the previous example.
sc = spark.sparkContext
# A JSON dataset is pointed to by path.
# The path can be either a single text file or a directory storing text files
path = "examples/src/main/resources/people.json"
peopleDF = spark.read.json(path)
# The inferred schema can be visualized using the printSchema() method
peopleDF.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Creates a temporary view using the DataFrame
peopleDF.createOrReplaceTempView("people")
# SQL statements can be run by using the sql methods provided by spark
teenagerNamesDF = spark.sql("SELECT name FROM people WHERE age BETWEEN 13 AND 19")
teenagerNamesDF.show()
# +------+
# | name|
# +------+
# |Justin|
# +------+
# Alternatively, a DataFrame can be created for a JSON dataset represented by
# an RDD[String] storing one JSON object per string
jsonStrings = ['{"name":"Yin","address":{"city":"Columbus","state":"Ohio"}}']
otherPeopleRDD = sc.parallelize(jsonStrings)
otherPeople = spark.read.json(otherPeopleRDD)
otherPeople.show()
# +---------------+----+
# | address|name|
# +---------------+----+
# |[Columbus,Ohio]| Yin|
# +---------------+----+
# $example off:json_dataset$
def jdbc_dataset_example(spark):
# $example on:jdbc_dataset$
# Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods
# Loading data from a JDBC source
jdbcDF = spark.read \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.load()
jdbcDF2 = spark.read \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Saving data to a JDBC source
jdbcDF.write \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.save()
jdbcDF2.write \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Specifying create table column data types on write
jdbcDF.write \
.option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)") \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# $example off:jdbc_dataset$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Spark SQL data source example") \
.getOrCreate()
basic_datasource_example(spark)
parquet_example(spark)
parquet_schema_merging_example(spark)
json_dataset_example(spark)
jdbc_dataset_example(spark)
spark.stop()
|
SHASHANKB/spark
|
examples/src/main/python/sql/datasource.py
|
Python
|
apache-2.0
| 7,838
|
[
"COLUMBUS"
] |
7423a0a5d5162f0f9fc1d711ad10435456ad416d8fa227f86d0f570b53b821fb
|
# This is the code that visits the warehouse.
from Pyro5.api import Proxy
from person import Person
uri = input("Enter the uri of the warehouse: ").strip()
warehouse = Proxy(uri)
janet = Person("Janet")
henry = Person("Henry")
janet.visit(warehouse)
henry.visit(warehouse)
|
irmen/Pyro5
|
examples/warehouse/phase2/visit.py
|
Python
|
mit
| 275
|
[
"VisIt"
] |
e0d8afbe85730b3063db3e08379cb7fff7bbcb5aefc2bf45739f770f95937613
|
from flask import Blueprint, g, current_app, request, jsonify, abort, Response, url_for
from flask_restful import Resource, Api
from pwnedapi import db
from pwnedapi.decorators import token_auth_required, roles_required, validate_json, csrf_protect
from pwnedapi.utils import CsrfToken, encode_jwt, get_bearer_token, send_email
from common.constants import QUESTIONS, DEFAULT_NOTE_V2, ADMIN_RESPONSE
from common.models import Config, User, Note, Message, Mail, Tool, Scan, Room, Membership
from common.utils import get_unverified_jwt_payload, unfurl_url
from common.validators import is_valid_password, is_valid_command
from datetime import datetime
from hashlib import md5
from secrets import token_urlsafe
from lxml import etree
import jwt
import os
resources = Blueprint('resources', __name__)
api = Api()
api.init_app(resources)
# PRE-REQUEST FUNCTIONS
@resources.before_app_request
def parse_jwt():
request.jwt = {}
token = request.cookies.get('access_token')
if Config.get_value('BEARER_AUTH_ENABLE'):
token = get_bearer_token(request.headers)
try:
payload = jwt.decode(token, current_app.config['SECRET_KEY'])
except:
return
request.jwt = payload
@resources.before_app_request
def load_user():
g.user = None
uid = request.jwt.get('sub')
if uid:
g.user = User.query.get(uid)
# API RESOURCE CLASSES
class TokenList(Resource):
def post(self):
'''Returns a JWT for the user that owns the provided credentials.'''
id_token = request.json.get('id_token')
username = request.json.get('username')
password = request.json.get('password')
user = None
# process OIDC credentials
if id_token:
payload = get_unverified_jwt_payload(id_token)
email = payload['email']
user = User.get_by_email(email)
if not user:
# register the user
user = User(
username=email.split('@')[0],
email=email,
avatar=payload['picture'],
signature='',
name=payload['name'],
password=token_urlsafe(20),
question=0,
answer=token_urlsafe(10),
)
db.session.add(user)
db.session.commit()
# process username and password credentials
elif username and password:
user = User.get_by_username(username)
if user and not user.check_password(password):
user = None
# handle authentication
if user and user.is_enabled:
data = {'user': user.serialize_self()}
# build other claims
claims = {}
path = os.path.join(current_app.config['UPLOAD_FOLDER'], md5(str(user.id).encode()).hexdigest())
if not os.path.exists(path):
os.makedirs(path)
claims['upload_folder'] = path
# create a JWT
token = encode_jwt(user.id, claims=claims)
# send the JWT as a Bearer token when the feature is enabled
if Config.get_value('BEARER_AUTH_ENABLE'):
data['access_token'] = token
# remove any existing access token cookie
return data, 200, {'Set-Cookie': 'access_token=; Expires=Thu, 01-Jan-1970 00:00:00 GMT'}
# default to cookie authentication
# return a CSRF token when using cookie authentication
csrf_obj = CsrfToken(user.id)
csrf_obj.sign(current_app.config['SECRET_KEY'])
data['csrf_token'] = csrf_obj.serialize()
# set the JWT as a HttpOnly cookie
return data, 200, {'Set-Cookie': f"access_token={token}; HttpOnly"}
abort(400, 'Invalid username or password.')
def delete(self):
response = Response(None, 204)
response.delete_cookie('access_token')
return response
api.add_resource(TokenList, '/access-token')
class UserList(Resource):
@token_auth_required
def get(self):
users = [u.serialize() for u in User.query.all()]
return {'users': users}
def post(self):
'''Creates an account.'''
username = request.json.get('username')
if User.query.filter_by(username=username).first():
abort(400, 'Username already exists.')
email = request.json.get('email')
if User.query.filter_by(email=email).first():
abort(400, 'Email already exists.')
password = request.json.get('password')
if not is_valid_password(password):
abort(400, 'Password does not meet complexity requirements.')
user = User(**request.json)
db.session.add(user)
db.session.commit()
return {'success': True}, 201
api.add_resource(UserList, '/users')
class UserInst(Resource):
@token_auth_required
def get(self, uid):
if uid == 'me' or uid == str(g.user.id):
return g.user.serialize_self()
user = User.query.get_or_404(uid)
return user.serialize()
@token_auth_required
@csrf_protect
def patch(self, uid):
if uid != 'me' and uid != str(g.user.id):
abort(403)
user = g.user
# validate that the provided username doesn't belong to another user
username = request.json.get('username', user.username)
untrusted_user = User.query.filter_by(username=username).first()
if untrusted_user and untrusted_user != g.user:
abort(400, 'Username already exists.')
# validate that the provided email doesn't belong to another user
email = request.json.get('email', user.email)
untrusted_user = User.query.filter_by(email=email).first()
if untrusted_user and untrusted_user != g.user:
abort(400, 'Email already exists.')
# update the user object
user.username = username
user.email = email
user.name = request.json.get('name', user.name)
user.avatar = request.json.get('avatar', user.avatar)
user.signature = request.json.get('signature', user.signature)
user.question = request.json.get('question', user.question)
user.answer = request.json.get('answer', user.answer)
db.session.add(user)
db.session.commit()
return user.serialize_self()
api.add_resource(UserInst, '/users/<string:uid>')
class AdminUserList(Resource):
@token_auth_required
@roles_required('admin')
def get(self):
users = [u.serialize_admin() for u in User.query.all()]
return {'users': users}
api.add_resource(AdminUserList, '/admin/users')
class AdminUserInst(Resource):
@token_auth_required
@roles_required('admin')
def patch(self, uid):
user = User.query.get_or_404(uid)
if user == g.user:
abort(400, 'Self-administration not permitted.')
print(request.json)
user.role = request.json.get('role', user.role)
user.status = request.json.get('status', user.status)
db.session.add(user)
db.session.commit()
return user.serialize_admin()
api.add_resource(AdminUserInst, '/admin/users/<string:uid>')
class QuestionList(Resource):
def get(self):
questions = [{'id': index, 'text': value} for (index, value) in QUESTIONS.items()]
return {'questions': questions}
api.add_resource(QuestionList, '/questions')
class PasswordResetList(Resource):
def post(self):
'''Creates and sends a password reset link.'''
credential = request.json.get('credential')
user = None
if credential:
user = User.get_by_email(credential) or User.get_by_username(credential)
if not user or not user.is_enabled:
abort(400, 'Invalid email address or username.')
# create a JWT
token = encode_jwt(user.id)
# "send an email" with a reset link using the token
base_url = request.headers['origin']
link = f"{base_url}/#/reset/{user.id}/{token}"
send_email(
sender = User.query.first().email,
recipient = user.email,
subject = 'PwnedHub Password Reset',
body = f"Hi {user.name}!<br><br>You recently requested to reset your PwnedHub password. Visit the following link to set a new password for your account.<br><br><a href=\"{link}\">{link}</a><br><br>If you did not request this password reset, please respond to this email to reach an administrator. Thank you.",
)
return {'success': True}, 201
api.add_resource(PasswordResetList, '/password-reset')
class PasswordInst(Resource):
def put(self, uid):
'''Updates a user's password.'''
current_password = request.json.get('current_password')
token = request.json.get('token')
user = User.query.get_or_404(uid)
new_password = None
# process current password
if current_password:
if not g.user:
abort(401)
if user.id != g.user.id:
abort(403)
if not user.check_password(current_password):
abort(400, 'Invalid current password.')
new_password = request.json.get('new_password')
# process reset token
elif token:
payload = get_unverified_jwt_payload(token)
if payload['sub'] != user.id:
abort(400, 'Invalid token.')
new_password = request.json.get('new_password')
# handle password update
if not new_password:
abort(400, 'Invalid request.')
if not is_valid_password(new_password):
abort(400, 'Password does not meet complexity requirements.')
user.password = new_password
db.session.add(user)
db.session.commit()
return {'success': True}
api.add_resource(PasswordInst, '/users/<string:uid>/password')
class NoteInst(Resource):
@token_auth_required
def get(self):
#note = Note.query.get_or_404(mid)
#if note.owner != g.user:
# abort(403)
note = g.user.notes.first()
content = note.content if note else DEFAULT_NOTE_V2
return {'content': content}
@token_auth_required
def put(self):
note = g.user.notes.first()
if not note:
note = Note(name='Notes', owner=g.user)
note.content = request.json.get('content')
db.session.add(note)
db.session.commit()
return {'success': True}
api.add_resource(NoteInst, '/notes')
class RoomList(Resource):
@token_auth_required
def get(self):
rooms = [r.serialize_with_context(g.user) for r in g.user.rooms]
return {'rooms': rooms}
@token_auth_required
@validate_json(['name', 'private', 'members'])
def post(self):
name = request.json.get('name')
private = request.json.get('private')
members = request.json.get('members')
room = Room.get_by_name(name)
code = 200
if not room:
# create the room
room = Room(
name=name,
private=private,
)
db.session.add(room)
db.session.commit()
# initialize memberships
for member in members:
user = User.query.get(member)
user.create_membership(room)
code = 200
return room.serialize_with_context(g.user), code
api.add_resource(RoomList, '/rooms')
class RoomMessageList(Resource):
@token_auth_required
def get(self, rid):
room = Room.query.get_or_404(rid)
if room not in g.user.rooms:
abort(403)
result = {
'messages': [],
'cursor': None,
'next': None,
}
cursor = float(request.args.get('cursor', datetime.now().timestamp()))
size = request.args.get('size', 8)
messages = room.messages.filter(Message.created < datetime.fromtimestamp(cursor)).order_by(Message.created.desc()).all()
if messages:
paged_messages = messages[:size]
next_cursor = str(paged_messages[-1].created.timestamp())
next_url = None
if messages[-1].created < paged_messages[-1].created:
next_url = url_for('resources.roommessagelist', rid=room.id, cursor=next_cursor, _external=True)
paged_messages.reverse()
result = {
'messages': [m.serialize() for m in paged_messages],
'cursor': next_cursor,
'next': next_url,
}
resp = jsonify(result)
resp.mimetype = 'text/html'
return resp
api.add_resource(RoomMessageList, '/rooms/<string:rid>/messages')
class MessageList(Resource):
# replaced by RoomMessageList resource
@token_auth_required
def get(self):
cursor = float(request.args.get('cursor', datetime.now().timestamp()))
size = request.args.get('size', 8)
messages = Message.query.filter(Message.created < datetime.fromtimestamp(cursor)).order_by(Message.created.desc()).all()
paged_messages = messages[:size]
next_cursor = str(paged_messages[-1].created.timestamp())
next_url = None
if messages[-1].created < paged_messages[-1].created:
next_url = url_for('resources.messagelist', cursor=next_cursor, _external=True)
paged_messages.reverse()
result = {
'messages': [m.serialize() for m in paged_messages],
'cursor': next_cursor,
'next': next_url,
}
resp = jsonify(result)
resp.mimetype = 'text/html'
return resp
# replaced by websocket event
@token_auth_required
def post(self):
jsonobj = request.get_json(force=True)
comment = jsonobj.get('message')
if not comment:
abort(400, 'Invalid request.')
message = Message(comment=comment, author=g.user)
db.session.add(message)
db.session.commit()
result = message.serialize()
resp = jsonify(result)
resp.mimetype = 'text/html'
return resp
api.add_resource(MessageList, '/messages')
class MessageInst(Resource):
# replaced by websocket event
@token_auth_required
def delete(self, mid):
message = Message.query.get_or_404(mid)
if message.author != g.user and not g.user.is_admin:
abort(403)
db.session.delete(message)
db.session.commit()
return '', 204
api.add_resource(MessageInst, '/messages/<string:mid>')
class MailList(Resource):
@token_auth_required
def get(self):
mail = [m.serialize() for m in g.user.received_mail.order_by(Mail.created.desc()).all()]
return {'mail': mail}
@token_auth_required
def post(self):
receiver = User.query.get(request.json.get('receiver'))
if not receiver:
abort(400, 'Invalid receiver.')
subject = request.json.get('subject')
content = request.json.get('content')
letter = Mail(
content=content,
subject=subject,
sender=g.user,
receiver=receiver,
)
db.session.add(letter)
db.session.commit()
# generate automated Administrator response
if receiver.role == 0:
content = ADMIN_RESPONSE
auto_letter = Mail(
content=content,
subject='RE:'+subject,
sender=receiver,
receiver=g.user,
)
db.session.add(auto_letter)
db.session.commit()
return letter.serialize()
api.add_resource(MailList, '/mail')
class MailInst(Resource):
@token_auth_required
def get(self, mid):
mail = Mail.query.get_or_404(mid)
if mail.receiver != g.user:
abort(403)
# mark mail as read
if mail.read == 0:
mail.read = 1
db.session.add(mail)
db.session.commit()
return mail.serialize()
@token_auth_required
def patch(self, mid):
mail = Mail.query.get_or_404(mid)
if mail.receiver != g.user:
abort(403)
# [Todo] this shouldn't work because only BaseQuery has update method
mail.update(request.json)
db.session.commit()
return mail.serialize()
@token_auth_required
def delete(self, mid):
mail = Mail.query.get_or_404(mid)
if mail.receiver != g.user:
abort(403)
db.session.delete(mail)
db.session.commit()
mail = [m.serialize() for m in g.user.received_mail.order_by(Mail.created.desc()).all()]
return {'mail': mail}
api.add_resource(MailInst, '/mail/<string:mid>')
class UnfurlList(Resource):
def post(self):
url = request.json.get('url')
headers = {'User-Agent': request.headers.get('User-Agent')}
if url:
try:
data = unfurl_url(url, headers)
status = 200
except Exception as e:
data = {'error': 'UnfurlError', 'message': str(e)}
status = 500
else:
data = {'error': 'RequestError', 'message': 'Invalid request.'}
status = 400
return data, status
api.add_resource(UnfurlList, '/unfurl')
class ToolList(Resource):
@token_auth_required
def get(self):
tools = [t.serialize() for t in Tool.query.all()]
return {'tools': tools}
@token_auth_required
@roles_required('admin')
@validate_json(['name', 'path', 'description'])
def post(self):
tool = Tool(
name=request.json.get('name'),
path=request.json.get('path'),
description=request.json.get('description'),
)
db.session.add(tool)
db.session.commit()
return tool.serialize(), 201
api.add_resource(ToolList, '/tools')
class ToolInst(Resource):
@token_auth_required
def get(self, tid):
query = 'SELECT id, name, path, description FROM tools WHERE id='+tid
try:
tool = db.session.execute(query).first() or {}
except:
tool = {}
return dict(tool)
@token_auth_required
@roles_required('admin')
def delete(self, tid):
tool = Tool.query.get_or_404(tid)
db.session.delete(tool)
db.session.commit()
return '', 204
api.add_resource(ToolInst, '/tools/<string:tid>')
class ScanList(Resource):
@token_auth_required
def get(self):
scans = [s.serialize() for s in g.user.scans.order_by(Scan.created.asc())]
return {'scans': scans}
@token_auth_required
@validate_json(['tid', 'args'])
def post(self):
tool = Tool.query.get(request.json.get('tid') or -1)
if not tool:
abort(400, 'Invalid tool ID.')
path = tool.path
args = request.json.get('args')
cmd = '{} {}'.format(path, args)
error = False
if not is_valid_command(cmd):
abort(400, 'Command contains invalid characters.')
job = current_app.task_queue.enqueue('pwnedapi.tasks.execute_tool', cmd)
sid = job.get_id()
scan = Scan(id=sid, command=cmd, owner=g.user)
db.session.add(scan)
db.session.commit()
return scan.serialize(), 201
api.add_resource(ScanList, '/scans')
class ScanInst(Resource):
@token_auth_required
def delete(self, sid):
scan = Scan.query.get_or_404(sid)
if scan.owner != g.user:
abort(403)
db.session.delete(scan)
db.session.commit()
return '', 204
api.add_resource(ScanInst, '/scans/<string:sid>')
class ResultsInst(Resource):
@token_auth_required
def get(self, sid):
scan = Scan.query.get_or_404(sid)
if scan.owner != g.user:
abort(403)
return {'results': scan.results}
api.add_resource(ResultsInst, '/scans/<string:sid>/results')
class ArtifactsList(Resource):
@token_auth_required
def post(self):
xml = request.data
parser = etree.XMLParser(no_network=False)
doc = etree.fromstring(xml, parser)
content = doc.find('content').text
filename = doc.find('filename').text
if all((content, filename)):
filename += '-{}.txt'.format(datetime.now().strftime('%s'))
msg = 'Artifact created \'{}\'.'.format(filename)
path = os.path.join(request.jwt.get('upload_folder'), filename)
if not os.path.isfile(path):
try:
with open(path, 'w') as fp:
fp.write(content)
except IOError:
msg = 'Unable to save as an artifact.'
else:
msg = 'An artifact with that name already exists.'
else:
msg = 'Invalid request.'
xml = '<xml><message>{}</message></xml>'.format(msg)
return Response(xml, mimetype='application/xml')
api.add_resource(ArtifactsList, '/artifacts')
|
lanmaster53/PwnedHub
|
pwnedapi/views/api.py
|
Python
|
gpl-3.0
| 21,246
|
[
"VisIt"
] |
d2b7fa7687a260bace73009d7d3b14249a67c65813d880f0f160aba0284b06d1
|
import sys
import os
import numpy as np
import h5py
import multiprocessing
import cPickle
import matplotlib.pyplot as plt
import itertools
from scipy.stats import sigmaclip
from sklearn.gaussian_process import GaussianProcess
fMapper = {
"apcp_sfc" : "Total_precipitation",
"dlwrf_sfc" : "Downward_Long-Wave_Rad_Flux",
"dswrf_sfc" : "Downward_Short-Wave_Rad_Flux",
"pres_msl" : "Pressure",
"pwat_eatm" : "Precipitable_water",
"spfh_2m" : "Specific_humidity_height_above_ground",
"tcdc_eatm" : "Total_cloud_cover",
"tcolc_eatm" : "Total_Column-Integrated_Condensate",
"tmax_2m" : "Maximum_temperature",
"tmin_2m" : "Minimum_temperature",
"tmp_2m" : "Temperature_height_above_ground",
"tmp_sfc" : "Temperature_surface",
"ulwrf_sfc" : "Upward_Long-Wave_Rad_Flux_surface",
"ulwrf_tatm" : "Upward_Long-Wave_Rad_Flux",
"uswrf_sfc" : "Upward_Short-Wave_Rad_Flux"
}
fKeys = ("apcp_sfc", "dlwrf_sfc", "dswrf_sfc", "pres_msl", "pwat_eatm",
"spfh_2m", "tcdc_eatm", "tcolc_eatm", "tmax_2m", "tmin_2m",
"tmp_2m", "tmp_sfc", "ulwrf_sfc", "ulwrf_tatm", "uswrf_sfc")
# Minimal script for gaussian process estimation
class Mesonet(object):
def __init__(self, stid, nlat, elon, elev, npts):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Gaussian process interpolation; mean and stdev
self.pdata = np.recarray((5*11*npts,), dtype={"names": fKeys,
"formats": (np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64)})
class GEFS(object):
def __init__(self, stid, nlat, elon, elev, npts):
self.stid = stid
self.nlat = nlat
self.elon = elon
self.elev = elev
# Capture mean and stdev
self.data = np.recarray((5*11*npts,), dtype={"names": fKeys,
"formats": (np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64,
np.float64, np.float64, np.float64, np.float64, np.float64)})
def runGaussianProcess((args, regr)):
nugmin = 0.025**2
vals, mcoords, gcoords = args
gp = GaussianProcess(corr="squared_exponential",
regr=regr,
theta0=1e-1, thetaL=1e-2, thetaU=1,
normalize=True,
nugget=nugmin,
random_start=1)
try:
gpres = gp.fit(gcoords, vals)
pred = gp.predict(mcoords) #, eval_MSE=True) # Speed this thing up!
except:
import pdb; pdb.set_trace()
#pred, varpred = gp.predict(mcoords) #, eval_MSE=True) # Speed this thing up!
#sigpred = np.sqrt(varpred)
return pred, #, sigpred
if __name__ == "__main__":
switch = sys.argv[1]
if switch == "train":
npts = 5113
else:
npts = 1796
sdata = np.loadtxt("../station_info.csv", delimiter=",", skiprows=1,
dtype = [("stid", np.str_, 4),
("nlat", np.float64),
("elon", np.float64),
("elev", np.float64)])
mesonets = {}
for sidx in range(len(sdata)):
s = sdata[sidx]
station = Mesonet(s[0], s[1], s[2], s[3], npts)
mesonets[s[0]] = station
gefss = {}
for key in fKeys:
print "# LOADING", key
if switch == "train":
f = h5py.File("../train/%s_latlon_subset_19940101_20071231.nc" % (key), "r")
else:
f = h5py.File("../test/%s_latlon_subset_20080101_20121130.nc" % (key), "r")
if len(gefss.keys()) == 0:
print "# INITIALIZING GEFS"
sidx = 0
for latidx in range(len(f['lat'])):
for lonidx in range(len(f['lon'])):
gefs = GEFS(sidx, f["lat"][latidx], f["lon"][lonidx]-360., 0.0, npts)
gefss[sidx] = gefs
sidx += 1
sidx = 0
for latidx in range(len(f['lat'])):
for lonidx in range(len(f['lon'])):
gefs = gefss[sidx]
data = f[fMapper[key]][:,:,:,latidx,lonidx] # 1796, 11, 5
gefs.data[key][:] = np.ravel(data) # data[1][0][0] = 9.0; np.ravel(data)[11*5] = 9.0
sidx += 1
# Mesonet coords
mlats = []
mlons = []
melevs = []
for mesonet in mesonets.values(): # MAKE SURE COORDS ARE READ IN THE SAME ORDER AS VALUES
mlats.append(mesonet.nlat)
mlons.append(mesonet.elon)
melevs.append(mesonet.elev)
mlats = np.array(mlats)
mlons = np.array(mlons)
melevs = np.array(melevs)
#mcoords = np.array(zip(mlats,mlons,melevs))
mcoords = np.array(zip(mlats,mlons))
# GEFS coordsgef
glats = []
glons = []
gelevs = []
for gefs in gefss.values(): # MAKE SURE COORDS ARE READ IN THE SAME ORDER AS VALUES
glats.append(gefs.nlat)
glons.append(gefs.elon)
gelevs.append(gefs.elev)
glats = np.array(glats)
glons = np.array(glons)
gelevs = np.array(gelevs)
#gcoords = np.array(zip(glats,glons,gelevs))
gcoords = np.array(zip(glats,glons))
# Get ready to run all the GP
pool = multiprocessing.Pool(multiprocessing.cpu_count()//2) # high mem!
tsteps = range(npts * 11 * 5)
#for dologit in (False, True):
for dologit in (False,):
if dologit:
if os.path.isfile("logit.pickle"):
print "# READING LOGIT PICKLE"
buff = open("logit.pickle", "rb")
fmins,fmaxs = cPickle.load(buff)
buff.close()
# Warning that this was set with the test data, hopefully the training data don't have different ranges...?
for key in fKeys:
gefs.data[key] -= fmins[key]
gefs.data[key] /= fmaxs[key] * 1.02
gefs.data[key] += 0.01
logit = np.log(gefs.data[key] / (1.0 - gefs.data[key]))
gefs.data[key] = logit
else:
sys.exit(1)
# logit.py should now do this
fmins = {}
fmaxs = {}
for key in fKeys:
fmins[key] = gefs.data[key].min()
gefs.data[key] -= fmins[key]
fmaxs[key] = gefs.data[key].max()
gefs.data[key] /= fmaxs[key] * 1.02
gefs.data[key] += 0.01
logit = np.log(gefs.data[key] / (1.0 - gefs.data[key]))
gefs.data[key] = logit
args = []
for tstep in tsteps:
print "# PREPPING T", tstep
for key in fKeys:
vals = []
for gefs in gefss.values(): # MAKE SURE VALUES ARE READ IN THE SAME ORDER AS COORDS
vals.append(gefs.data[key][tstep])
args.append((np.array(vals), mcoords, gcoords))
for regr in ("constant", "linear", "quadratic"):
#results = pool.map(runGaussianProcess, itertools.izip(args, itertools.repeat(regr)))
results = []
for arg in args:
results.append(runGaussianProcess((arg, regr)))
ridx = 0
for tstep in tsteps:
print "# SAVING T", tstep
for key in fKeys:
result = results[ridx]
for i in range(len(result[0])):
mesonet = mesonets.values()[i] # MAKE SURE VALUES ARE READ IN THE SAME ORDER AS COORDS
mesonet.pdata[key][tstep] = result[0][i]
ridx += 1
if dologit:
datafile = "gp2b_%s_%s_logit.pickle" % (switch, regr)
buff = open(datafile, "wb")
cPickle.dump((mesonets, fmins, fmaxs), buff)
buff.close()
else:
datafile = "gp2b_%s_%s.pickle" % (switch, regr)
buff = open(datafile, "wb")
cPickle.dump(mesonets, buff)
buff.close()
del results
|
acbecker/solar
|
gp2/gp2.py
|
Python
|
mit
| 8,811
|
[
"Gaussian"
] |
8fe38dc71f013e7cdc1b4c00530a21e963efa61e9ab542f3283e868f9af56e87
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hc(MakefilePackage):
"""HC is a global mantle circulation solver following Hager & O'Connell
(1981) which can compute velocities, tractions, and geoid for simple
density distributions and plate velocities."""
homepage = "https://geodynamics.org/cig/software/hc/"
url = "https://geodynamics.org/cig/software/hc/hc-1.0.7.tar.gz"
version('1.0.7', sha256='7499ea76ac4739a9c0941bd57d124fb681fd387c8d716ebb358e6af3395103ed')
depends_on('gmt@4.2.1:4')
depends_on('netcdf-c')
# Build phase fails in parallel with the following error messages:
# /usr/bin/ld: cannot find -lrick
# /usr/bin/ld: cannot find -lhc
# /usr/bin/ld: cannot find -lggrd
parallel = False
def setup_build_environment(self, env):
env.set('GMTHOME', self.spec['gmt'].prefix)
env.set('NETCDFHOME', self.spec['netcdf-c'].prefix)
env.set('HC_HOME', self.prefix)
env.unset('ARCH')
def install(self, spec, prefix):
# Most files are installed during the build stage.
# Manually install header files as well.
for header in find('.', '*.h'):
install(header, prefix.include)
|
LLNL/spack
|
var/spack/repos/builtin/packages/hc/package.py
|
Python
|
lgpl-2.1
| 1,392
|
[
"NetCDF"
] |
bbae91ab1855638579ff95c58da9e7a1f1648c55363bf2cfce20ae4013e45529
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2009 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module that provides the _Waiter class """
from __future__ import absolute_import
from __future__ import print_function
from types import GeneratorType
import ast
import inspect
from ._util import _dedent
from ._delay import delay
from ._join import join
from ._Signal import _Signal, _WaiterList, posedge, negedge
from ._simulator import _simulator
from ._compat import ast_parse
schedule = _simulator._futureEvents.append
class _Waiter(object):
__slots__ = ('caller', 'generator', 'hasRun', 'nrTriggers', 'semaphore')
def __init__(self, generator, caller=None):
self.caller = caller
self.generator = generator
self.hasRun = 0
self.nrTriggers = 1
self.semaphore = 0
def next(self, waiters, actives, exc):
if self.hasRun:
raise StopIteration
if self.semaphore:
self.semaphore -= 1
raise StopIteration
if self.nrTriggers == 1:
clone = self
else:
self.hasRun = 1
clone = _Waiter(self.generator, self.caller)
try:
clause = next(self.generator)
except StopIteration:
if self.caller:
waiters.append(self.caller)
raise # again
if isinstance(clause, _WaiterList):
clauses = (clause,)
elif isinstance(clause, (tuple, list)):
clone.nrTriggers = len(clause)
if clause:
clauses = clause
else:
clauses = (None,)
elif isinstance(clause, join):
clone.semaphore = len(clause._args)-1
clauses = clause._args
else:
clauses = (clause,)
nr = len(clauses)
for clause in clauses:
if isinstance(clause, _WaiterList):
clause.append(clone)
if nr > 1:
actives[id(clause)] = clause
elif isinstance(clause, _Signal):
wl = clause._eventWaiters
wl.append(clone)
if nr > 1:
actives[id(wl)] = wl
elif isinstance(clause, delay):
t = _simulator._time
schedule((t + clause._time, clone))
elif isinstance(clause, GeneratorType):
waiters.append(_Waiter(clause, clone))
elif isinstance(clause, _Instantiator):
waiters.append(_Waiter(clause.gen, clone))
elif isinstance(clause, join):
waiters.append(_Waiter(clause._generator(), clone))
elif clause is None:
waiters.append(clone)
elif isinstance(clause, Exception):
waiters.append(clone)
if not exc:
exc.append(clause)
else:
raise TypeError("yield clause %s has type %s" %
(repr(clause), type(clause)))
class _DelayWaiter(_Waiter):
__slots__ = ('generator')
def __init__(self, generator):
self.generator = generator
def next(self, waiters, actives, exc):
clause = next(self.generator)
schedule((_simulator._time + clause._time, self))
class _EdgeWaiter(_Waiter):
__slots__ = ('generator', 'hasRun')
def __init__(self, generator):
self.generator = generator
self.hasRun = 0
def next(self, waiters, actives, exc):
clause = next(self.generator)
clause.append(self)
class _EdgeTupleWaiter(_Waiter):
__slots__ = ('generator', 'hasRun')
def __init__(self, generator):
self.generator = generator
self.hasRun = 0
def next(self, waiters, actives, exc):
if self.hasRun:
raise StopIteration
clauses = next(self.generator)
self.hasRun = 1
clone = _EdgeTupleWaiter(self.generator)
for clause in clauses:
clause.append(clone)
actives[id(clause)] = clause
class _SignalWaiter(_Waiter):
__slots__ = ('generator', 'hasRun')
def __init__(self, generator):
self.generator = generator
self.hasRun = 0
def next(self, waiters, actives, exc):
clause = next(self.generator)
clause._eventWaiters.append(self)
class _SignalTupleWaiter(_Waiter):
__slots__ = ('generator', 'hasRun')
def __init__(self, generator):
self.generator = generator
self.hasRun = 0
def next(self, waiters, actives, exc):
if self.hasRun:
raise StopIteration
clauses = next(self.generator)
self.hasRun = 1
clone = _SignalTupleWaiter(self.generator)
for clause in clauses:
wl = clause._eventWaiters
wl.append(clone)
actives[id(wl)] = wl
class _kind(object):
SIGNAL_TUPLE = 1
EDGE_TUPLE = 2
SIGNAL = 3
EDGE = 4
DELAY = 5
UNDEFINED = 6
def _inferWaiter(gen):
f = gen.gi_frame
s = inspect.getsource(f)
s = _dedent(s)
root = ast_parse(s)
root.symdict = f.f_globals.copy()
root.symdict.update(f.f_locals)
# print ast.dump(root)
v = _YieldVisitor(root)
v.visit(root)
if v.kind == _kind.EDGE_TUPLE:
return _EdgeTupleWaiter(gen)
if v.kind == _kind.SIGNAL_TUPLE:
return _SignalTupleWaiter(gen)
if v.kind == _kind.DELAY:
return _DelayWaiter(gen)
if v.kind == _kind.EDGE:
return _EdgeWaiter(gen)
if v.kind == _kind.SIGNAL:
return _SignalWaiter(gen)
# default
return _Waiter(gen)
class _YieldVisitor(ast.NodeVisitor):
def __init__(self, root):
self.kind = None
self.root = root
def visit_Yield(self, node):
self.visit(node.value)
if not hasattr(node.value, 'kind'):
self.kind = _kind.UNDEFINED
elif not self.kind:
self.kind = node.value.kind
elif self.kind != node.value.kind:
self.kind = _kind.UNDEFINED
def visit_Tuple(self, node):
kind = None
for elt in node.elts:
self.visit(elt)
if not hasattr(elt, 'kind'):
kind = _kind.UNDEFINED
elif not kind:
kind = elt.kind
elif kind != elt.kind:
kind = _kind.UNDEFINED
if kind == _kind.SIGNAL:
node.kind = _kind.SIGNAL_TUPLE
elif kind == _kind.EDGE:
node.kind = _kind.EDGE_TUPLE
else:
node.kind = _kind.UNDEFINED
def visit_Call(self, node):
fn = node.func
if not isinstance(fn, ast.Name):
node.kind = _kind.UNDEFINED
return
self.visit(fn)
node.kind = fn.kind
def visit_Name(self, node):
n = node.id
node.kind = _kind.UNDEFINED
if n in self.root.symdict:
obj = self.root.symdict[n]
if isinstance(obj, _Signal):
node.kind = _kind.SIGNAL
elif obj is delay:
node.kind = _kind.DELAY
elif obj is posedge or obj is negedge:
node.kind = _kind.EDGE
def visit_Attribute(self, node):
node.kind = _kind.UNDEFINED
if node.attr in ('posedge', 'negedge'):
node.kind = _kind.EDGE
from ._instance import _Instantiator
|
jmgc/myhdl-numeric
|
myhdl/_Waiter.py
|
Python
|
lgpl-2.1
| 8,214
|
[
"VisIt"
] |
533499245683195f9f0c4443f41f8191f9860260526ccc5a3fd66d5ed00ded06
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.modeling.plot_truncated Plot the truncated images for a certain factor.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
# Import the relevant PTS classes and modules
from pts.core.tools import filesystem as fs
from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments
from pts.modeling.core.environment import GalaxyModelingEnvironment
from pts.magic.plot.imagegrid import StandardImageGridPlotter
from pts.core.filter.filter import parse_filter
from pts.core.tools.parsing import real
from pts.magic.core.frame import Frame
from pts.modeling.core.environment import verify_modeling_cwd
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
# The galaxy name
definition.add_required("factor", "real", "truncation ellipse factor for which to plot")
# Get configuration
config = parse_arguments("plot_truncated", definition)
# -----------------------------------------------------------------
modeling_path = verify_modeling_cwd()
# -----------------------------------------------------------------
# Load the modeling environment
environment = GalaxyModelingEnvironment(modeling_path)
# -----------------------------------------------------------------
# Create plotter
plotter = StandardImageGridPlotter()
# Loop over the directories in the truncation path
for path, name in fs.directories_in_path(environment.truncation_path, returns=["path", "name"]):
# Determine the path to the lowres directory
lowres_path = fs.join(path, "lowres")
# Determine the filter
fltr = parse_filter(name)
filter_name = str(fltr)
# Initializ variable
the_image_path = None
# Find the image corresponding to the specified factor
for image_path, image_name in fs.files_in_path(lowres_path, extension="fits", returns=["path", "name"]):
# Determine the factor
factor = real(image_name)
# If the factor corresponds to the specified factor, take this image
if np.isclose(factor, config.factor, rtol=0.01):
the_image_path = image_path
break
# Check
if the_image_path is None: raise ValueError("No truncated " + filter_name + " image found for a factor of " + str(config.factor))
# Add the image
frame = Frame.from_file(the_image_path)
plotter.add_image(frame, filter_name)
# -----------------------------------------------------------------
# Run the plotter
plotter.run()
# -----------------------------------------------------------------
|
SKIRT/PTS
|
do/modeling/plot_truncated.py
|
Python
|
agpl-3.0
| 3,068
|
[
"Galaxy"
] |
2cac0fc2a3e1cc1d665a46c4635aca762694c3ec3f9f1a8eaabb5e2a4c468792
|
import collections
import itertools
import os
import re
def parseModuleFile(modulefile):
''' Function to parse module file.
Args:
modulefile (str)- Path to tab-delimited file containing module data.
The first and second columns are required and are the program and
the program path. Additional columns should list the modules
required for the program.
Returns:
pathDict (dict)- A dictionary where the key is the program and the
value is the path
moduleDict (dict)- A dictionary where the key is the program and the
value is a list of required modules.
'''
# Create output variables
pathDict = {}
moduleDict = {}
# Import each line as a list
with open(modulefile) as infile:
for line in infile:
linedata = line.strip().split('\t')
# Extract and store data
program = linedata[0]
path = linedata[1]
modules = linedata[2:]
pathDict[program] = path
moduleDict[program] = modules
# Return data
return(pathDict, moduleDict)
def parseSampleFile(samplefile):
''' Function to parse sample file.
Args:
samplefile (str)- Path to tab-delimited file containing sample data.
The first column is the sample name which will be used as a prefix
for all outut files. The second column should be the prefix for
the identification of FASTQ files. Additional columns should list
directories in which to search for FASTQ files.
Returns:
sampleDict (dict)- A collections ordered dictionary where the key
is the sample name and the value in a tuple where the first
element is the prefix and the second element is a list of
directories.
'''
# Create output variable
sampleDict = collections.OrderedDict()
prefixList = []
# Import each line of the file as list
with open(samplefile) as infile:
for line in infile:
linedata = line.strip().split('\t')
# Extract and store data
name = linedata[0]
prefix = linedata[1]
indirs = linedata[2:]
if len(indirs) < 1:
raise IOError('No input directores for {}'.format(name))
sampleDict[name] = (prefix, indirs)
prefixList.append(prefix)
# Check prefixes will identify unique files
for p1, p2 in itertools.permutations(prefixList, 2):
if p1.startswith(p2):
raise IOError("prefices '{}' and '{}' overlap".format(p1, p2))
# Return output
return(sampleDict)
def parseParameterFile(paramfile):
''' Function to parse parameter file
Args:
paramfile (str)- Path to tabdelimited paramter file.
Returns:
paramDict (dict)- An dictionary of all parameters for analysis.
'''
# Create and populate parameter file
paramDict = {}
with open(paramfile) as infile:
for line in infile:
# Skip comment lines
if line.startswith('#'):
continue
# Extract data and try type conversion
param, value = line.strip().split('\t')
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
# Store data
paramDict[param] = value
# Return data
return(paramDict)
def parseIndexFile(indexfile):
''' Function to parse index file
Args:
paramfile (str)- Path to tabdelimited paramter file.
Returns:
paramDict (dict)- An dictionary of all parameters for analysis.
'''
# Create and populate index dictionary
indexDict = {}
with open(indexfile) as infile:
for line in infile:
# Extract data and try type conversion
param, value = line.strip().split('\t')
indexDict[param] = value
# Return data
return(indexDict)
def findFastq(prefix, dirList):
''' A function to identify FASTQ files from directories
using a supplied filename prefix
Args:
prefix (str)- Prefix of the FASTQ files to be found.
dirList (list)- A list of directories to search.
Returns:
read1 (list)- A list of read1 FASTQ files
read2 (list)- A list of read2 FASTQ files
'''
# Create variables to store results
read1 = []
read2 = []
# Create regular expression to find files
prefix = re.escape(prefix)
read1Pattern = re.compile(prefix + '.*?R1(_\\d{3}){0,1}\\.fastq.gz$')
# Loop through directories to find fastq files
for directory in dirList:
# Loop through file names and find read1 files
filenames = os.listdir(directory)
for f in filenames:
if re.match(read1Pattern, f):
read1.append(os.path.join(directory, f))
# Find and store matching read2 files
read2File, nsub = re.subn(
'R1(?=(_\\d{3}){0,1}\\.fastq.gz$)', 'R2', f)
if nsub != 1:
raise IOError('Could not generate read2 filename'\
' for %s' %(f))
if read2File in filenames:
read2.append(os.path.join(directory, read2File))
# Check output files and return
if len(read1) == 0:
raise IOError('{}: No FASTQ files found'.format(prefix))
if len(read2) and len(read1) != len(read2):
raise IOError('{}: Mixed single- and paired-end'.format(prefix))
return(read1, read2)
def createOutFiles(outdir, sample):
''' Function to create output files for analysis
Args:
outdir (str)- Path to output directory
sample (str)- Sample name
Returns
outDict (dict)- Dictionary of output files.
'''
# Create variable to store files
outfiles = {}
# Create output directories and output prefix
sampledir = os.path.join(outdir, sample)
if not os.path.isdir(sampledir):
os.mkdir(sampledir)
outprefix = os.path.join(sampledir, sample) + '.'
# Store directories, prefixes and job file
outfiles['prefix'] = outprefix
outfiles['outdir'] = sampledir
outfiles['slurm'] = outprefix + 'slurm'
# Create file names for processing FASTQ files
outfiles['cat1'] = outprefix + 'R1.fastq.gz'
outfiles['cat2'] = outprefix + 'R2.fastq.gz'
outfiles['trim1'] = outprefix + 'trim.R1.fastq.gz'
outfiles['trim2'] = outprefix + 'trim.R2.fastq.gz'
outfiles['fastqclog'] = outprefix + 'fastqc.log'
outfiles['trimlog'] = outprefix + 'cutadapt.metrics'
# Create file names for processing BAM files
outfiles['starbam'] = outprefix + 'Aligned.out.bam'
outfiles['starlog'] = outprefix + 'star.log'
outfiles['sortbam'] = outprefix + 'sort.bam'
outfiles['sortlog'] = outprefix + 'sort.log'
outfiles['mdupbam'] = outprefix + 'mdup.bam'
outfiles['mduplog1'] = outprefix + 'mdup.metrics'
outfiles['mduplog2'] = outprefix + 'mdup.log'
# Create output files for htseq
outfiles['htseqlog'] = outprefix + 'htseq.log'
outfiles['genecounts'] = outprefix + 'gene_counts.txt'
# Create file names for QC of BAM files
outfiles['metrlog1'] = outprefix + 'collectrna.metrics'
outfiles['metrlog2'] = outprefix + 'collectrna.log'
outfiles['alsumlog1'] = outprefix + 'alignsum.metrics'
outfiles['alsumlog2'] = outprefix + 'alignsum.log'
# Return data
return(outfiles)
def fastQC(inFile, outDir, path):
''' This function performs a FastQC analysis on a fastq file and
then organises the output data. Function is built for version 0.11.2
of FastQC. Function takes three arguments:
1) inFile - Input FASTQ file.
2) outDir - Output directory.
3) path - Path to FastQC; Default = 'fastqc'.
'''
# Extract sample name
name = re.search('([^/]+)\\.fastq(?:\\.gz){0,1}$',inFile).group(1)
# Create FastQC command and return it
fastqcCommand = '%s --extract -q -o %s %s && rm %s %s' %(
path,
outDir,
inFile,
os.path.join(outDir, name + '_fastqc.html'),
os.path.join(outDir, name + '_fastqc.zip')
)
# Execute or return command
return(fastqcCommand)
def cutadapt(
read1In, read1Out, read2In, read2Out, quality, adapter, length, path,
overlap, error
):
''' A function to create cutadapt command
Args:
read1In (str)- Path to read1 input file.
read1Out (str)- Path to read2 output file.
read2In (str)- Path to read2 input file.
read2Out (str)- Path to read2 output file.
quality (int)- Base quality score to use for trimming.
adapter (str)- Adapter to use for trimming.
length (int)- Minimum length of trimmed reads.
path (str)- Path for cutadapt program.
'''
# Check arguments
if not read2In is None and read2Out is None:
raise IOError('Output file must be supplied for 2nd read')
if not isinstance(length, int):
raise TypeError('length must be integer')
if length < 25:
raise ValueError('length must be >=25')
if not isinstance(overlap, int):
raise TypeError('overlap must be integer')
if not 1 <= overlap <= len(adapter):
raise ValueError('overlap must be >=1 and <= adapter length')
if not isinstance(error, (int, float)):
raise TypeError('error must be integer or float')
if not 0 <= error < 1:
raise ValueError('error must be >=0 and <1')
# Create single end argument
adapterList = adapter.split(',')
command = [path]
if read2In is None:
for a in adapterList:
command.extend(['-a', a])
command.extend([
'-o', read1Out, '-e', error, '-q', quality, '-m', length, '-O',
overlap, read1In])
else:
for a in adapterList:
command.extend(['-a', a, '-A', a])
command.extend([
'-o', read1Out, '-p', read2Out, '-e', error, '-q', quality, '-m',
length, '-O', overlap, read1In, read2In])
# Join and return command
command = ' '.join(map(str, command))
return command
def starAlign(
indexDir, outPrefix, read1, read2, threads, path, rg=1,
pl='uknown', lb='unknown', sm='uknown'
):
# Create output command
command = [path, '--runThreadN', threads, '--genomeDir', indexDir,
'--outFileNamePrefix', outPrefix, '--outSAMtype', 'BAM', 'Unsorted',
'--outSAMunmapped', 'Within', '--readFilesIn', read1]
if read2:
command.append(read2)
# Append read file command
if read1.endswith('.gz'):
if read2.endswith('.gz'):
command.extend(['--readFilesCommand', 'zcat'])
else:
raise ValueError('mixture of compressed and uncompressed files')
# Add read group information
if rg:
command.extend(['--outSAMattrRGline', 'ID:{}'.format(rg)])
if pl:
command.append('PL:{}'.format(pl))
if lb:
command.append('LB:{}'.format(lb))
if sm:
command.append('SM:{}'.format(sm))
# Concatenate commadn and return
command = ' '.join(map(str, command))
return(command)
def bamsort(
inFile, outFile, threads, memory, path
):
''' Function to create sort BAM commabd using samtools.
Args:
inFile (str)- Path to input file.
outFile (str)- Path to outfile.
threads (int)- Number of threads to use in sort.
memory (int)- Memory, in gigabytes, to use in each thread.
path (str)- Path to samtools executable.
Returns:
sortCommand (str)- Output command
'''
# Check input file
if not inFile.endswith('.bam'):
raise TypeError('Input file suffix must be .bam')
# Check output file
if not outFile.endswith('.bam'):
raise TypeError('Output file suffix must be .bam')
# Process memory argument
memory = str(memory) + 'G'
# Generate sort command
sortCommand = [path, 'sort', '-m', memory, '-@', str(threads),
'-o', outFile, '-T', outFile[:-4], '-O', 'BAM', inFile]
sortCommand = filter(None, sortCommand)
sortCommand = ' '.join(sortCommand)
# Delete input and index output
sortCommand += ' && {} index {}'.format(path, outFile)
sortCommand += ' && rm {}'.format(inFile)
# Return command
return(sortCommand)
def markDuplicates(
inBam, outBam, logFile, picardPath, memory
):
''' Function to mark duplicates using the picard toolkit.
Args:
inBam (str)- Full path to input BAM file.
outBam (str)- Full path to output BAM file.
logFile (str)- Full path to output log file.
picardPath (str)- Path to picard jar file.
memory (int)- Amount of memory in java heap in gigabytes.
Returns:
command (str)- Mark duplicates command
'''
# Create command
command = [
'java', '-jar', '-Xmx{}g'.format(memory), picardPath, 'MarkDuplicates',
'I=' + inBam, 'O=' + outBam, 'M=' + logFile, 'ASSUME_SORTED=true',
'CREATE_INDEX=true', 'REMOVE_DUPLICATES=false'
]
# Merge command, add deletion and return
command = ' '.join(command)
command += ' && rm {}*'.format(inBam[:-1])
return(command)
def rnaseqMetric(
bam, output, refflat, strand, rrna, path, memory
):
''' Function to generate command for picard CollectRNASeqMetrics
Args:
bam (str)- Path to input BAM file.
output (str)- Path to output file.
refflat (str)- Path to reflat file.
strand (str)- Strand: should be one none|forward|reverse.
Returns:
command (str)- CollectRnaSeqMetrics command.
'''
# Check strand argument
if strand == 'none':
strandArg = 'STRAND=NONE'
elif strand == 'forward':
strandArg = 'STRAND=FIRST_READ_TRANSCRIPTION_STRAND'
elif strand == 'reverse':
strandArg = 'STRAND=SECOND_READ_TRANSCRIPTION_STRAND'
else:
raise ValueError('strans must be one of none|forward|reverse')
# Build command
command = [
'java', '-jar', '-Xmx{}g'.format(memory), path, 'CollectRnaSeqMetrics',
'I=' + bam, 'O=' + output, 'REF_FLAT=' + refflat, strandArg,
'RIBOSOMAL_INTERVALS=' + rrna
]
# Join and return command
command = ' '.join(command)
return(command)
def alignMetrics(
bam, output, fasta, path, memory
):
''' Function to generate command for picard CollectAlignmentSummeryMetrics
Args:
bam (str)- Path to input BAM file.
output (str)- Path to output file.
fasta (str)- Path to FASTA file.
path (str)- Path to picard executable file.
memory (int)- Initial heap size in gigabytes.
Returns:
command (str)- CollectAlignmentSummaryMetrics command.
'''
# Create command
command = [
'java', '-jar', '-Xmx{}g'.format(memory), path,
'CollectAlignmentSummaryMetrics', 'R=' + fasta, 'I=' + bam,
'O=' + output
]
# Join and return command
command = ' '.join(command)
return(command)
def htseq(
bam, gtf, path, feature='exon', attrid='gene_id', mode='union',
stranded='reverse', mapq=10
):
# Check arguments
if not mode in ('union', 'intersection-strict', 'intersection-nonempty'):
raise ValueError('unrecognised mode')
if not stranded in ('yes', 'no', 'reverse'):
raise ValueError('unrecognised stranded argument')
if not isinstance(mapq, int):
raise TypeError('mapq not an integer')
if mapq < 0:
raise ValueError('mapq is negative')
# Create command
command = [path, '-f', 'bam', '-r', 'pos', '-s', stranded, '-t', feature,
'-i', attrid, '-m', mode, '-a', mapq, bam, gtf]
# Join and return command
command = ' '.join(map(str, command))
return(command)
|
adam-rabinowitz/ngs_python
|
scripts/RNASeq_Pipeline_EMBL/EMBL_RNASeq_Functions.py
|
Python
|
gpl-2.0
| 16,201
|
[
"HTSeq"
] |
50a64cf150d13e7e46c9c99841bb8b8bdfc270c915f0f2ca6ac72775b57c7d8e
|
import os
import glob
import pandas
import bisect
from MooseDataFrame import MooseDataFrame
import message
class VectorPostprocessorReader(object):
"""
A Reader for MOOSE VectorPostprocessor data.
Args:
pattern[str]: A pattern of files (for use with glob) for loading.
MOOSE outputs VectorPostprocessor data in separate files for each timestep, using the timestep as a prefix. For
example: file_000.csv, file_001.csv, etc.
Therefore, a pattern acceptable for use with the python glob package must be supplied. For the above files,
"file_*.csv" should be supplied.
This object manages the loading and unloading of data and should always be in a valid state, regardless of the
existence of a file. It will also append new data and remove old/deleted data on subsequent calls to "update()".
"""
#: Status flags for loading/reloading/removing csv files (see "_modified").
NO_CHANGE = 0
NEW_DATA = 1
OLD_DATA = 2
def __init__(self, pattern, run_start_time=None):
self.filename = pattern
self._timedata = MooseDataFrame(self.filename.replace('*', 'time'), run_start_time=None, index='timestep')
self._modified_times = dict()
#self._run_start_time = run_start_time
self.data = pandas.Panel()
self.update()
self._minimum_modified = 0.0#self._run_start_time if self._run_start_time else 0.0
def __call__(self, keys, time=None, exact=False, **kwargs):
"""
Operator() returns the latest time or the desired time.
Args:
keys[str|list]: The key(s) to return.
time[float]: The time at which the data should be returned.
exact[bool]: When the time supplied is not an exact match, if 'exact=False' is provided the nearest time
less than the provided time is returned, when false an empty DataFrame is returned.
"""
# Return the latest time
if time == None:
return self.data.iloc[-1][keys]
# Return the specified time
elif time in self.data.keys().values:
return self.data[time][keys]
# Time not found and 'exact=True'
elif exact:
return pandas.DataFrame()
# Time not found and 'exact=False'
else:
times = self.data.keys()
n = len(times)
idx = bisect.bisect_right(times, time) - 1
if idx < 0:
idx = 0
elif idx > n:
idx = -1
return self.data.iloc[idx][keys]
def __getitem__(self, key):
"""
Column based access to VectorPostprocessor data.
Args:
key[str]: A VectorPostprocessor name.
Returns:
pandas.DataFrame containing the data for all available times (column).
"""
if self.data.empty:
return pandas.DataFrame()
else:
return self.data.minor_xs(key)
def __nonzero__(self):
"""
Allows this object to be used in boolean cases.
Example:
data = VectorPostprocessorReader('files_*.csv')
if not data:
print 'No data found!'
"""
return not self.data.empty
def __contains__(self, variable):
"""
Returns true if the variable exists in the data structure.
"""
return variable in self.variables()
def times(self):
"""
Returns the list of available time indices contained in the data.
"""
return list(self.data.keys().values)
def clear(self):
"""
Remove all data.
"""
self.data = pandas.Panel()
self._modified_times = dict()
self._minimum_modified = 0.0# self._run_start_time if self._run_start_time else 0.0
def variables(self):
"""
Return a list of postprocessor variable names listed in the reader.
"""
return self.data.axes[2]
def update(self):
"""
Update data by adding/removing files.
"""
# Return code (1 = something changed)
retcode = 0
# Update the time data file
self._timedata.update()
# The current filenames, time index, and modified status
filenames, indices, modified = self._filenames()
# Clear the data if empty
if not filenames:
self.clear()
return 1
# Loop through the filenames
for fname, index, mod in zip(filenames, indices, modified):
if mod == VectorPostprocessorReader.NEW_DATA:
try:
df = pandas.read_csv(fname)
except:
message.mooseWarning('The file {} failed to load, it is likely empty.'.format(fname))
continue
df.insert(0, 'index (Peacock)', pandas.Series(df.index, index=df.index))
if self.data.empty:
self.data = pandas.Panel({index:df})
else:
self.data[index] = df
retcode = 1
elif (mod == VectorPostprocessorReader.OLD_DATA) and (index in self.data.keys()):
self.data.pop(index)
retcode = 1
# Remove missing files
for key in self.data.keys():
if key not in indices:
self.data.pop(key)
retcode = 1
return retcode
def repr(self):
"""
Return components for building script.
Returns:
(output, imports) The necessary script and include statements to re-create data load.
"""
imports = ['import mooseutils']
output = ['\n# Read VectorPostprocessor Data']
output += ['data = mooseutils.VectorPostprocessorReader({})'.format(repr(self.filename))]
return output, imports
def _filenames(self):
"""
Returns the available filenames, time index, and modified status. (protected)
"""
# The list of files from the supplied pattern
filenames = sorted(glob.glob(self.filename))
# Remove the "_time.csv" from the list, if it exists
try:
filenames.remove(self._timedata.filename)
except:
pass
# Update the minimum modified time
if len(filenames) > 0:
self._minimum_modified = os.path.getmtime(filenames[0])
else:
self._minimum_modified = 0
# Determine the time index and modified status
indices, modified = [], []
for fname in filenames:
indices.append(self._time(fname))
modified.append(self._modified(fname))
return filenames, indices, modified
def _modified(self, filename):
"""
Determine the modified status of a filename. (protected)
"""
modified = os.path.getmtime(filename)
if modified < self._minimum_modified:
self._modified_times.pop(filename, None)
return VectorPostprocessorReader.OLD_DATA
elif (filename not in self._modified_times) or (modified > self._modified_times[filename]):
self._modified_times[filename] = os.path.getmtime(filename)
return VectorPostprocessorReader.NEW_DATA
return VectorPostprocessorReader.NO_CHANGE
def _time(self, filename):
"""
Determine the time index. (protected)
"""
idx = filename.rfind('_') + 1
tstep = int(filename[idx:-4])
if not self._timedata:
return tstep
else:
try:
return self._timedata['time'].loc[tstep]
except Exception:
return tstep
|
Chuban/moose
|
python/mooseutils/VectorPostprocessorReader.py
|
Python
|
lgpl-2.1
| 7,773
|
[
"MOOSE"
] |
a3f7f7f5fb062e1c1378a4612dd7696c07b994274f64d6e9db8cf0d416d1bae6
|
# Copyright (c) 2014 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan, Yugang <yugang.fan@intel.com>
import time
import json
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
NoSuchElementException,
StaleElementReferenceException,
NoAlertPresentException,
WebDriverException)
from atip.tizen import tizen
from atip.common import common
try:
from urlparse import urljoin, urlparse
except ImportError:
from urllib.parse import urljoin, urlparse
class WebAPP(common.APP):
def __init__(self, app_config=None, app_name=None):
self.__driver = None
self.app_type = common.APP_TYPE_WEB
self.app_name = app_name
self.app_id = ""
apk_activity_name = ""
apk_pkg_name = ""
if "platform" in app_config and "name" in app_config["platform"]:
if app_config["platform"]["name"].upper().find('TIZEN') >= 0:
app_id = tizen.get_appid_by_name(
self.app_name, app_config["platform"])
if app_config["platform"]["name"].upper().find('ANDROID') >= 0:
self.app_name = self.app_name.replace("-", "_")
apk_name_update = "".join(
[i.capitalize() for i in self.app_name.split("_") if i])
apk_activity_name = ".%sActivity" % apk_name_update
apk_pkg_name = "org.xwalk.%s" % self.app_name
app_config_str = json.dumps(app_config).replace(
"TEST_APP_NAME", self.app_name).replace(
"TEST_APP_ID", self.app_id).replace(
"TEST_PKG_NAME", apk_pkg_name).replace(
"TEST_ACTIVITY_NAME", apk_activity_name)
self.app_config = json.loads(app_config_str)
if "url-prefix" in app_config:
self.url_prefix = app_config["url-prefix"]
else:
self.url_prefix = ""
def __get_element_by_xpath(self, xpath, display=True):
try:
element = self.__driver.find_element_by_xpath(xpath)
if display:
try:
if element.is_displayed():
return element
except StaleElementReferenceException:
pass
else:
return element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_tag(self, key, display=True):
try:
element = self.__driver.find_element_by_tag(key)
return element
except Exception as e:
print "Failed to get element: %s" % e
return None
def __get_element_by_key(self, key, display=True):
try:
for i_element in self.__driver.find_elements_by_xpath(str(
"//*[@id='%(key)s']|"
"//*[@name='%(key)s']|"
"//*[@value='%(key)s']|"
"//*[contains(@class, '%(key)s')]|"
"//button[contains(text(), '%(key)s')]|"
"//input[contains(text(), '%(key)s')]|"
"//textarea[contains(text(), '%(key)s')]|"
"//a[contains(text(), '%(key)s')]") % {'key': key}):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
print "Failed to get element"
except Exception as e:
print "Failed to get element: %s" % e
return None
def __check_normal_text(self, text, display=True):
try:
for i_element in self.__driver.find_elements_by_xpath(str(
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def __check_normal_text_element(self, text, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
try:
for i_element in element.find_elements_by_xpath(str(
'//*[contains(normalize-space(.),"{text}") '
'and not(./*[contains(normalize-space(.),"{text}")])]'
.format(text=text))):
if display:
try:
if i_element.is_displayed():
return i_element
except StaleElementReferenceException:
pass
else:
return i_element
except Exception as e:
print "Failed to get element: %s" % e
return None
def launch_app(self):
try:
desired_capabilities = self.app_config["desired-capabilities"]
self.__driver = WebDriver(
str(self.app_config["driver-url"]), desired_capabilities)
except Exception as e:
print "Failed to launch %s: %s" % (self.app_name, e)
return False
return True
def switch_url(self, url, with_prefix=True):
if with_prefix:
url = urljoin(self.url_prefix, url)
try:
self.__driver.get(url)
except Exception as e:
print "Failed to visit %s: %s" % (url, e)
return False
return True
def title(self):
try:
return self.__driver.title
except Exception as e:
print "Failed to get title: %s" % e
return None
def current_url(self):
try:
return self.__driver.current_url
except Exception as e:
print "Failed to get current url: %s" % e
return None
def reload(self):
self.__driver.refresh()
return True
def back(self):
self.__driver.back()
return True
def forward(self):
self.__driver.forward()
return True
def check_normal_text_timeout(self, text=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text(text, display):
return True
time.sleep(0.2)
return False
def check_normal_text_element_timeout(
self, text=None, key=None, display=True, timeout=2):
end_time = time.time() + timeout
while time.time() < end_time:
if self.__check_normal_text_element(text, key, display):
return True
time.sleep(0.2)
return False
def press_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
element.click()
return True
return False
def click_element_by_key(self, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
ActionChains(self.__driver).click(element).perform()
return True
return False
def click_element_coords(self, x, y, key, display=True):
element = self.__get_element_by_key(key, display)
if element:
ActionChains(self.__driver).move_to_element_with_offset(
element, x, y).click().perform()
return True
return False
def fill_element_by_key(self, key, text, display=True):
element = self.__get_element_by_key(key, display)
if element:
element.send_keys(text)
return True
return False
def check_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if not element.is_selected():
element.click()
return True
return False
def uncheck_checkbox_by_key(self, key, display=True):
element = self.__get_element_by_xpath(str(
"//input[@id='%(key)s'][@type='checkbox']|"
"//input[@name='%(key)s'][@type='checkbox']") % {'key': key}, display)
if element:
if element.is_selected():
element.click()
return True
return False
def get_alert_text(self):
try:
alert_element = self.__driver.switch_to_alert()
if alert_element:
return alert_element.text
except Exception as e:
print "Failed to get alert text: %s" % e
return None
def check_alert_existing(self):
try:
self.__driver.switch_to_alert().text
except NoAlertPresentException:
return False
return True
def accept_alert(self):
try:
alert_element = self.__driver.switch_to_alert()
alert_element.accept()
return True
except Exception as e:
print "Failed to accept alert: %s" % e
return False
def quit(self):
if self.__driver:
self.__driver.quit()
def launch_webapp_by_name(context, app_name):
if not context.web_config:
assert False
if app_name in context.apps:
context.apps[app_name].quit()
context.apps.update({app_name: WebAPP(context.web_config, app_name)})
context.app = context.apps[app_name]
if not context.app.launch_app():
assert False
assert True
|
xiaojunwu/crosswalk-test-suite
|
tools/atip/atip/web/web.py
|
Python
|
bsd-3-clause
| 11,607
|
[
"VisIt"
] |
6a6555d0509ee31f5c949fe66d5c333949a72950b11b02da66215a8e65c1ed63
|
#!/usr/bin/env python
# Emacs: treat this as -*- python -*-
import os
import sys
from optparse import OptionParser
def build_parser():
usage = '%prog [OPTION] [BASIS]...'
parser = OptionParser(usage=usage, version='%prog 1.0')
parser.add_option('-f', '--files', action='store_true',
dest='actual_filenames',
help='Read from specified filenames rather than '
'searching GPAW setup directories')
parser.add_option('-s', '--save-figs', action='store_true', dest='save',
help='Save figures to disk rather than showing plots')
parser.add_option('-l', '--literal', action='store_true',
help='Do not pre-multiply wave functions by r in plots')
parser.add_option('-n', '--normalize', action='store_true',
help='Plot normalized wave functions')
parser.add_option('-x', '--ext', default='png',
help='Image format [default: %default]')
return parser
def main():
parser = build_parser()
opts, files = parser.parse_args()
import pylab
from gpaw.basis_data import Basis, BasisPlotter
plotter = BasisPlotter(premultiply=not opts.literal,
normalize=opts.normalize,
show=False,
save=opts.save,
ext=opts.ext)
for path in files:
dir, filename = os.path.split(path)
splitfilename = filename.split('.')
symbol = splitfilename[0]
extension = splitfilename[-1]
name = '.'.join(splitfilename[1:-1])
if opts.actual_filenames:
basis = Basis(symbol, name, False)
basis.read_xml(path)
else: # Search GPAW setup dirs
basis = Basis(symbol, name)
plotter.plot(basis)
if not opts.save:
pylab.show()
|
qsnake/gpaw
|
gpaw/lcao/analyse_basis.py
|
Python
|
gpl-3.0
| 1,912
|
[
"GPAW"
] |
8ad06ba67184e7cfa4e876b24c28af7d26fc95f13ae13ad5b921f670777d8a2a
|
"""
Filename: lss.py
Reference: http://quant-econ.net/py/linear_models.html
Computes quantities associated with the Gaussian linear state space model.
"""
from textwrap import dedent
import numpy as np
from numpy.random import multivariate_normal
from scipy.linalg import solve
#-Check if Numba is Available-#
from .util import numba_installed, jit
def simulate_linear_model(A, x0, v, ts_length):
"""
This is a separate function for simulating a vector linear system of
the form
x_{t+1} = A x_t + v_t given x_0 = x0
Here x_t and v_t are both n x 1 and A is n x n.
The purpose of separating this functionality out is to target it for
optimization by Numba. For the same reason, matrix multiplication is
broken down into for loops.
Parameters
----------
A : array_like or scalar(float)
Should be n x n
x0 : array_like
Should be n x 1. Initial condition
v : np.ndarray
Should be n x ts_length-1. Its t-th column is used as the time t
shock v_t
ts_length : int
The length of the time series
Returns
--------
x : np.ndarray
Time series with ts_length columns, the t-th column being x_t
"""
A = np.asarray(A)
n = A.shape[0]
x = np.empty((n, ts_length))
x[:, 0] = x0
for t in range(ts_length-1):
# x[:, t+1] = A.dot(x[:, t]) + v[:, t]
for i in range(n):
x[i, t+1] = v[i, t] #Shock
for j in range(n):
x[i, t+1] += A[i, j] * x[j, t] #Dot Product
return x
if numba_installed:
simulate_linear_model = jit(simulate_linear_model)
class LinearStateSpace(object):
"""
A class that describes a Gaussian linear state space model of the
form:
x_{t+1} = A x_t + C w_{t+1}
y_t = G x_t + H v_t
where {w_t} and {v_t} are independent and standard normal with dimensions
k and l respectively. The initial conditions are mu_0 and Sigma_0 for x_0
~ N(mu_0, Sigma_0). When Sigma_0=0, the draw of x_0 is exactly mu_0.
Parameters
----------
A : array_like or scalar(float)
Part of the state transition equation. It should be `n x n`
C : array_like or scalar(float)
Part of the state transition equation. It should be `n x m`
G : array_like or scalar(float)
Part of the observation equation. It should be `k x n`
H : array_like or scalar(float), optional(default=None)
Part of the observation equation. It should be `k x l`
mu_0 : array_like or scalar(float), optional(default=None)
This is the mean of initial draw and is `n x 1`
Sigma_0 : array_like or scalar(float), optional(default=None)
This is the variance of the initial draw and is `n x n` and
also should be positive definite and symmetric
Attributes
----------
A, C, G, H, mu_0, Sigma_0 : see Parameters
n, k, m, l : scalar(int)
The dimensions of x_t, y_t, w_t and v_t respectively
"""
def __init__(self, A, C, G, H=None, mu_0=None, Sigma_0=None):
self.A, self.G, self.C = list(map(self.convert, (A, G, C)))
#-Check Input Shapes-#
ni,nj = self.A.shape
if ni != nj:
raise ValueError("Matrix A (shape: %s) needs to be square" % (self.A.shape))
if ni != self.C.shape[0]:
raise ValueError("Matrix C (shape: %s) does not have compatible dimensions with A. It should be shape: %s" % (self.C.shape, (ni,1)))
self.m = self.C.shape[1]
self.k, self.n = self.G.shape
if self.n != ni:
raise ValueError("Matrix G (shape: %s) does not have compatible dimensions with A (%s)"%(self.G.shape, self.A.shape))
if H is None:
self.H = None
self.l = None
else:
self.H = self.convert(H)
self.l = self.H.shape[1]
if mu_0 is None:
self.mu_0 = np.zeros((self.n, 1))
else:
self.mu_0 = self.convert(mu_0)
self.mu_0.shape = self.n, 1
if Sigma_0 is None:
self.Sigma_0 = np.zeros((self.n, self.n))
else:
self.Sigma_0 = self.convert(Sigma_0)
def __repr__(self):
return self.__str__()
def __str__(self):
m = """\
Linear Gaussian state space model:
- dimension of state space : {n}
- number of innovations : {m}
- dimension of observation equation : {k}
"""
return dedent(m.format(n=self.n, k=self.k, m=self.m))
def convert(self, x):
"""
Convert array_like objects (lists of lists, floats, etc.) into
well formed 2D NumPy arrays
"""
return np.atleast_2d(np.asarray(x, dtype='float32'))
def simulate(self, ts_length=100):
"""
Simulate a time series of length ts_length, first drawing
x_0 ~ N(mu_0, Sigma_0)
Parameters
----------
ts_length : scalar(int), optional(default=100)
The length of the simulation
Returns
-------
x : array_like(float)
An n x ts_length array, where the t-th column is x_t
y : array_like(float)
A k x ts_length array, where the t-th column is y_t
"""
x0 = multivariate_normal(self.mu_0.flatten(), self.Sigma_0)
w = np.random.randn(self.m, ts_length-1)
v = self.C.dot(w) # Multiply each w_t by C to get v_t = C w_t
# == simulate time series == #
x = simulate_linear_model(self.A, x0, v, ts_length)
if self.H is not None:
v = np.random.randn(self.l, ts_length)
y = self.G.dot(x) + self.H.dot(v)
else:
y = self.G.dot(x)
return x, y
def replicate(self, T=10, num_reps=100):
"""
Simulate num_reps observations of x_T and y_T given
x_0 ~ N(mu_0, Sigma_0).
Parameters
----------
T : scalar(int), optional(default=10)
The period that we want to replicate values for
num_reps : scalar(int), optional(default=100)
The number of replications that we want
Returns
-------
x : array_like(float)
An n x num_reps array, where the j-th column is the j_th
observation of x_T
y : array_like(float)
A k x num_reps array, where the j-th column is the j_th
observation of y_T
"""
x = np.empty((self.n, num_reps))
for j in range(num_reps):
x_T, _ = self.simulate(ts_length=T+1)
x[:, j] = x_T[:, -1]
if self.H is not None:
v = np.random.randn(self.l, num_reps)
y = self.G.dot(x) + self.H.dot(v)
else:
y = self.G.dot(x)
return x, y
def moment_sequence(self):
"""
Create a generator to calculate the population mean and
variance-convariance matrix for both x_t and y_t, starting at
the initial condition (self.mu_0, self.Sigma_0). Each iteration
produces a 4-tuple of items (mu_x, mu_y, Sigma_x, Sigma_y) for
the next period.
Yields
------
mu_x : array_like(float)
An n x 1 array representing the population mean of x_t
mu_y : array_like(float)
A k x 1 array representing the population mean of y_t
Sigma_x : array_like(float)
An n x n array representing the variance-covariance matrix
of x_t
Sigma_y : array_like(float)
A k x k array representing the variance-covariance matrix
of y_t
"""
# == Simplify names == #
A, C, G, H = self.A, self.C, self.G, self.H
# == Initial moments == #
mu_x, Sigma_x = self.mu_0, self.Sigma_0
while 1:
mu_y = G.dot(mu_x)
if H is None:
Sigma_y = G.dot(Sigma_x).dot(G.T)
else:
Sigma_y = G.dot(Sigma_x).dot(G.T) + H.dot(H.T)
yield mu_x, mu_y, Sigma_x, Sigma_y
# == Update moments of x == #
mu_x = A.dot(mu_x)
Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T)
def stationary_distributions(self, max_iter=200, tol=1e-5):
"""
Compute the moments of the stationary distributions of x_t and
y_t if possible. Computation is by iteration, starting from the
initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of x_t
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of y_t
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of x_t
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of y_t
"""
# == Initialize iteration == #
m = self.moment_sequence()
mu_x, mu_y, Sigma_x, Sigma_y = next(m)
i = 0
error = tol + 1
# == Loop until convergence or failure == #
while error > tol:
if i > max_iter:
fail_message = 'Convergence failed after {} iterations'
raise ValueError(fail_message.format(max_iter))
else:
i += 1
mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)
error_mu = np.max(np.abs(mu_x1 - mu_x))
error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))
error = max(error_mu, error_Sigma)
mu_x, Sigma_x = mu_x1, Sigma_x1
# == Prepare return values == #
mu_x_star, Sigma_x_star = mu_x, Sigma_x
mu_y_star, Sigma_y_star = mu_y1, Sigma_y1
return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star
def geometric_sums(self, beta, x_t):
"""
Forecast the geometric sums
S_x := E [sum_{j=0}^{\infty} beta^j x_{t+j} | x_t ]
S_y := E [sum_{j=0}^{\infty} beta^j y_{t+j} | x_t ]
Parameters
----------
beta : scalar(float)
Discount factor, in [0, 1)
beta : array_like(float)
The term x_t for conditioning
Returns
-------
S_x : array_like(float)
Geometric sum as defined above
S_y : array_like(float)
Geometric sum as defined above
"""
I = np.identity(self.n)
S_x = solve(I - beta * self.A, x_t)
S_y = self.G.dot(S_x)
return S_x, S_y
|
agutieda/QuantEcon.py
|
quantecon/lss.py
|
Python
|
bsd-3-clause
| 11,000
|
[
"Gaussian"
] |
be363b493e366ca1ccf21fd95442fc77bd9428787e3a378fa3600d4ac819bea5
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 14:36:29 2017
@author: derek
"""
import os
import tensorflow as tf
import numpy as np
def _parse_function(example_proto):
"""Reads tfrecords with features {shape: (height,width,depth) of cube data,
label: (malignancy, lobulation, spiculation) labels, cube: usually 32x32x32 data).
Mapped onto a TFRecord dataset
Args:
example_proto: TFRecord protobuffer of data
Returns:
shape_int32: (int32) (height,width,depth)
label_int32: (int32) (malignancy, lobulation, spiculation)
cube: (float32) height x width x depth data (usually 32x32x32)
"""
features = {"shape": tf.FixedLenFeature((), tf.string, default_value=""),
"label": tf.FixedLenFeature((), tf.string, default_value=""),
"cube": tf.FixedLenFeature((), tf.string, default_value="")}
parsed_features = tf.parse_single_example(example_proto, features)
shape = tf.decode_raw(parsed_features['shape'], tf.int16)
shape_int32 = tf.cast(shape,tf.int32)
label = tf.decode_raw(parsed_features['label'], tf.int16)
label_int32 = tf.cast(label,tf.int32)
cube_flat = tf.decode_raw(parsed_features['cube'], tf.int16)
cube_flat_f32 = tf.cast(cube_flat,dtype=tf.float32)
cube = tf.reshape(cube_flat_f32,[shape_int32[0],shape_int32[1],shape_int32[2]])
return shape_int32,label_int32,cube
def augment_data(transpose_index,k_value,flip_yes_no, cubes):
"""augment data (cubes) by rotating the cubes k_values times, and tranposing
the indices specified by transpose_index.
To randomize input:
transpose_index: random permutation of [0,1,2]
k_value: random int [0-3]
flip_yes_no: random int [0-1]
Args:
transpose_index: (np array) array discribing the new order of the transposed
axis [x_axis, y_axis, z_axis] [0,1,2]-> would keep axis unchanged.
k_value: (int) number of rotations, 0 would keep data unrotated
Returns:
shape_int32: (int32) (height,width,depth)
label_int32: (int32) (malignancy, lobulation, spiculation)
cube: (float32) height x width x depth data (usually 32x32x32)
"""
cubes_trans = tf.map_fn(lambda img: tf.transpose(img, transpose_index), cubes)
cubes_90 = tf.map_fn(lambda img: tf.image.rot90(img,k=k_value), cubes_trans)
cubes_out = cubes_90
if flip_yes_no == 1:
cubes_out = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cubes_out)
return cubes_out
def _normalize(image):
""" Normalize image -> clip data between -1000 and 400. Scale values to -0.5 to 0.5
"""
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = tf.maximum(MIN_BOUND, image)
image = tf.minimum(MAX_BOUND, image)
image = (image - MIN_BOUND)
image = image / (MAX_BOUND - MIN_BOUND)
image = image - 0.5
return image
def _randomize(image):
"""Add randomization to the image by raising the image values to a random
power between 1 and 10. Then renormalize to -.5 to .5
Args:
image: input 3d data cube
Returns:
image: image after power and renormalized
"""
image = image - tf.reduce_min(image)
image = tf.pow(image, tf.random_uniform([1],minval=1,maxval=10))
image = image/tf.reduce_max(image)
image = image - 0.5
return image
##########################################################################
##########################################################################
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_initializer(name, shape, initializer=tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _variable_initializer(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
var = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32)
return var
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
#tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
#tf.summary.histogram(tensor_name + '/activations', x)
#tf.summary.scalar(tensor_name + '/sparsity',
#tf.nn.zero_fraction(x))
#tf.summary.histogram(x)
#tf.summary.scalar(x)
pass
BATCH_SIZE = 128
#NUM_CLASSES = 3
NUM_CLASSES = 3
global_step = tf.contrib.framework.get_or_create_global_step()
filenames = tf.placeholder(tf.string, shape=[None])
dataset = tf.contrib.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function) # Parse the record into tensors.
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat() # Repeat the input indefinitely.
dataset = dataset.batch(BATCH_SIZE)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
transpose_index = tf.Variable(initial_value=[0,1,2],trainable=False,dtype=tf.int32)
k_value = tf.Variable(initial_value=0,trainable=False,dtype=tf.int32)
flip_yes_no = tf.Variable(initial_value=0,trainable=False,dtype=tf.int32)
shape,label,cubes = next_element
cubes = _normalize(cubes) # Normalize t0 -.5 to .5.
cubes = _randomize(cubes)
cubes_aug = augment_data(transpose_index, k_value, flip_yes_no, cubes)
#mal, lob, spic = tf.unstack(label,num = 3)
mal, lob, spic = tf.split(label,3,axis=1)
label_onehot = tf.one_hot(mal,6)
label_f= tf.reshape(mal,[BATCH_SIZE])
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
#with tf.variable_scope('conv1') as scope:
kernel1 = _variable_with_weight_decay('weights1',
shape=[5, 5, 5, 1, 64],
stddev=5e-2,
wd=0.0)
conv1_ = tf.nn.conv3d(cubes_aug, kernel1, [1, 1, 1, 1, 1], padding='SAME')
biases1 = _variable_initializer('biases1', [64], tf.constant_initializer(0.0))
pre_activation1 = tf.nn.bias_add(conv1_, biases1)
conv1 = tf.nn.relu(pre_activation1, name='scope.name1')
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
#with tf.variable_scope('conv2') as scope:
kernel2 = _variable_with_weight_decay('weights2',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv2_ = tf.nn.conv2d(norm1, kernel2, [1, 1, 1, 1], padding='SAME')
biases2 = _variable_initializer('biases2', [64], tf.constant_initializer(0.1))
pre_activation2 = tf.nn.bias_add(conv2_, biases2)
conv2 = tf.nn.relu(pre_activation2, name='scope.name2')
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
#with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
pool2_flatten = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = 4096
weights = _variable_with_weight_decay('weights3', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_initializer('biases3', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(pool2_flatten, weights) + biases, name='scope.name3')
_activation_summary(local3)
# local4
#with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights4', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_initializer('biases4', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='scope.name4')
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
#with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights5', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_initializer('biases5', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='scope.name')
mal_, lob_, spic_ = tf.split(softmax_linear,3,axis=1)
_activation_summary(softmax_linear)
#return softmax_linear
##########################################################################
##########################################################################
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
mal_fl32 = tf.cast(mal,tf.float32)
lob_fl32 = tf.cast(lob,tf.float32)
spic_fl32 = tf.cast(spic,tf.float32)
mal_cost = tf.pow(mal_ - mal_fl32, 2)
lob_cost = tf.pow(lob_ - lob_fl32, 2)
spic_cost = tf.pow(spic_ - spic_fl32, 2)
cost_function = tf.reduce_sum(mal_cost + lob_cost + spic_cost)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
#return tf.add_n(tf.get_collection('losses'), name='total_loss')
##########################################################################
##########################################################################
lr = 0.00001
optimizer_ = tf.train.GradientDescentOptimizer(lr)
grads = optimizer_.compute_gradients(cross_entropy_mean)
#grads = optimizer_.compute_gradients(cost_function)
# Apply gradients.
apply_gradient_op = optimizer_.apply_gradients(grads, global_step=global_step)
train_op = apply_gradient_op
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init)
src_dir_train = "/media/derek/disk1/kaggle_ndsb2017/resources/_tfrecords/train/"
src_dir_test = "/media/derek/disk1/kaggle_ndsb2017/resources/_tfrecords/test/"
filenames_train = os.listdir(src_dir_train)
filenames_test = os.listdir(src_dir_test)
training_filenames = [src_dir_train + f for f in filenames_train]
testing_filenames = [src_dir_test + f for f in filenames_test]
f_train = open("train_rms_values_rand_" + str(lr) + ".txt","a")
f_test = open("test_rms_values_rand_" + str(lr) + ".txt","a")
transpose_possiblities = np.array([[0,1,2],[0,2,1],[1,0,2],[1,2,0],[2,0,1],[2,1,0]])
#sess.run(train_op, feed_dict={transpose_index: transpose_possiblities[np.random.randint(0,6),:], k_value: np.random.randint(0,4)})
for index in range(10000):
sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
for i in range(100):
sess.run(train_op, feed_dict={transpose_index: transpose_possiblities[np.random.randint(0,6),:], k_value: np.random.randint(0,4)})
#train_results = sess.run(cost_function,feed_dict={transpose_index: [0,1,2], k_value: 0})
train_results = sess.run(accuracy,feed_dict={transpose_index: [0,1,2], k_value: 0})
print(train_results)
f_train.write(str(train_results) + "\n")
sess.run(iterator.initializer, feed_dict={filenames: testing_filenames})
#test_results = sess.run(cost_function,feed_dict={transpose_index: [0,1,2], k_value: 0})
test_results = sess.run(accuracy,feed_dict={transpose_index: [0,1,2], k_value: 0})
f_test.write(str(test_results) + "\n")
f_train.flush()
f_test.flush()
if np.mod(index,9)==0:
ave_path = saver.save(sess, "/media/derek/disk1/kaggle_ndsb2017/saved_models/model.ckpt")
|
dereknewman/cancer_detection
|
train_rms_cubes_3d.py
|
Python
|
mit
| 13,192
|
[
"Gaussian"
] |
3beb7474e7abef251e1298788c4d7feaa9ffe24693bb44276157b685bffb6542
|
import pytest
import warnings
import numpy as np
import astropy.units as u
# from astropy.modeling import models, fitting
from ..analysis_utilities import stack_spectra, fourier_shift
from .utilities import generate_gaussian_cube, gaussian
from ..utils import BadVelocitiesWarning
def test_shift():
amp = 1
v0 = 0 * u.m / u.s
sigma = 8
spectral_axis = np.arange(-50, 51) * u.m / u.s
true_spectrum = gaussian(spectral_axis.value,
amp, v0.value, sigma)
# Shift is an integer, so rolling is equivalent
rolled_spectrum = np.roll(true_spectrum, 10)
shift_spectrum = fourier_shift(true_spectrum, 10)
np.testing.assert_allclose(shift_spectrum,
rolled_spectrum,
rtol=1e-4)
# With part masked
masked_spectrum = true_spectrum.copy()
mask = np.abs(spectral_axis.value) <= 30
masked_spectrum[~mask] = np.NaN
rolled_mask = np.roll(mask, 10)
rolled_masked_spectrum = rolled_spectrum.copy()
rolled_masked_spectrum[~rolled_mask] = np.NaN
shift_spectrum = fourier_shift(masked_spectrum, 10)
np.testing.assert_allclose(shift_spectrum,
rolled_masked_spectrum,
rtol=1e-4)
def test_stacking():
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# Now fit a Gaussian to the mean stacked profile.
# fit_vals = fit_gaussian(stacked.spectral_axis.value, stacked.value)[0]
# np.testing.assert_allclose(fit_vals, np.array([amp, v0.value, sigma]),
# atol=1e-3)
# The stacked spectrum should have the same spectral axis
np.testing.assert_allclose(stacked.spectral_axis.value,
test_cube.spectral_axis.value)
def test_stacking_badvels():
'''
Regression test for #493: don't include bad velocities when stacking
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
test_vels[12,11] = 500*u.km/u.s
with pytest.warns(BadVelocitiesWarning,
match='Some velocities are outside the allowed range and will be'):
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals (the one bad value shouldn't have caused a problem)
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
def test_stacking_reversed_specaxis():
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, spec_scale=-1. * u.km / u.s)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# The stacked spectrum should have the same spectral axis
np.testing.assert_allclose(stacked.spectral_axis.value,
test_cube.spectral_axis.value)
def test_stacking_wpadding():
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
sigma = 8.
v0 = 0. * u.km / u.s
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# Now fit a Gaussian to the mean stacked profile.
# fit_vals = fit_gaussian(stacked.spectral_axis.value, stacked.value)[0]
# np.testing.assert_allclose(fit_vals, np.array([amp, 0.0, sigma]),
# atol=1e-3)
# The spectral axis should be padded by ~25% on each side
stack_shape = int(test_cube.shape[0] * 1.5)
# This is rounded, so the shape could be +/- 1
assert (stacked.size == stack_shape) or (stacked.size == stack_shape - 1) \
or (stacked.size == stack_shape + 1)
def test_padding_direction():
amp = 1.
sigma = 8.
v0 = 0. * u.km / u.s
noise = None
shape = (100, 2, 2)
vel_surface = np.array([[0, 5], [5, 10]])
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise,
vel_surface=vel_surface)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# now check that the stacked spectral axis is right
# (all shifts are negative, so vmin < -50 km/s, should be -60?)
assert stacked.spectral_axis.min() == -60*u.km/u.s
assert stacked.spectral_axis.max() == 49*u.km/u.s
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
def test_stacking_woffset():
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
Make sure the operations aren't affected by absolute velocity offsets
'''
amp = 1.
sigma = 8.
v0 = 100. * u.km / u.s
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise,
v0=v0.value)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# The spectral axis should be padded by ~25% on each side
stack_shape = int(test_cube.shape[0] * 1.5)
# This is rounded, so the shape could be +/- 1
assert (stacked.size == stack_shape) or (stacked.size == stack_shape - 1) \
or (stacked.size == stack_shape + 1)
def test_stacking_shape_failure():
"""
Regression test for #466
"""
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape)
# make the test_vels array the wrong shape
test_vels = test_vels[:-1, :-1]
with pytest.raises(ValueError) as exc:
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
assert 'Velocity surface map does not match' in exc.value.args[0]
test_vels = np.ones(shape[1:], dtype='float') + np.nan
with pytest.raises(ValueError) as exc:
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
assert "velocity_surface contains no finite values" in exc.value.args[0]
def test_stacking_noisy():
# Test stack w/ S/N of 0.2
# This is cheating b/c we know the correct peak velocities, but serves as
# a good test that the stacking is working.
amp = 1.
sigma = 8.
v0 = 0 * u.km / u.s
noise = 5.0
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False,
pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= noise / np.sqrt(shape[1] * shape[2])
# Now fit a Gaussian to the mean stacked profile.
# fit_vals, fit_errs = fit_gaussian(stacked.spectral_axis.value,
# stacked.value)
# Check that the fit is consistent with the true values within 1-sigma err
# for fit_val, fit_err, true_val in zip(fit_vals, fit_errs,
# [amp, v0.value, sigma]):
# np.testing.assert_allclose(fit_val, true_val,
# atol=fit_err)
# def fit_gaussian(vels, data):
# g_init = models.Gaussian1D()
# fit_g = fitting.LevMarLSQFitter()
# g_fit = fit_g(g_init, vels, data)
# cov = fit_g.fit_info['param_cov']
# if cov is None:
# cov = np.zeros((3, 3)) * np.NaN
# parvals = g_fit.parameters
# parerrs = np.sqrt(np.diag(cov))
# return parvals, parerrs
|
jzuhone/spectral-cube
|
spectral_cube/tests/test_analysis_functions.py
|
Python
|
bsd-3-clause
| 11,793
|
[
"Gaussian"
] |
96536632b458d6aa09810787f88f2e4f9d369da396594cad8e62497622332ed3
|
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from distutils.core import setup
from distutils.extension import Extension
# from Pyrex.Distutils import build_ext
from Cython.Distutils import build_ext
setup(
name = 'Dirac',
version = "0.0.2",
description = "Dirac 0.6.0 bindings for python",
author = "Michael",
author_email = "ms_@users.sourceforge.net",
url = "http://kamaelia.sourceforge.net/",
ext_modules=[
Extension("dirac_parser",
["dirac_parser.pyx"],
libraries = ["dirac_decoder"],
include_dirs = ["/usr/include/dirac"],
),
Extension("dirac_encoder",
["dirac_encoder.pyx"],
libraries = ["dirac_encoder"],
include_dirs = ["/usr/include/dirac"],
),
],
cmdclass = {'build_ext': build_ext},
long_description = """Initial set of python bindings for Dirac 0.6.0 release.
This API is subject to change. Requires Pyrex, Dirac, and Dirac
headers are expected to live in /usr/local/include/dirac
For information on dirac, see http://dirac.sf.net/
"""
)
|
bbc/kamaelia
|
Code/Python/Bindings/Dirac-0.6.0/setup.py
|
Python
|
apache-2.0
| 1,931
|
[
"DIRAC"
] |
d3e499fec2f80e715f010077017f82922ea4439d1e2c0f20d7573cc504a685c5
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import xml.etree.ElementTree
from xml.etree.cElementTree import ElementTree, fromstring
from .Database import Database
class XMLConfigLoader:
def __init__(self):
self.config = {}
self.options = {}
def get_config(self):
return self.config.copy()
def load_file(self, path):
tree = ElementTree()
tree.parse(path)
self.load_tree(tree, path)
def load_data(self, data):
root = fromstring(data)
tree = ElementTree(root)
self.load_tree(tree)
def load_tree(self, tree, path=""):
self.tree = tree
self.path = path
#self.tree.parse(path)
self.root = self.tree.getroot()
cd_based = False
amiga_model = "A500"
platform = self.root.find("game").find("platform").text.strip()
if platform == "CD32":
amiga_model = "CD32"
cd_based = True
self.options["joystick_port_1_mode"] = "cd32 gamepad"
elif platform == "CDTV":
amiga_model = "CDTV"
cd_based = True
self.config["amiga_model"] = amiga_model
self.viewport = []
game_node = self.root.find("game")
if game_node is not None:
game_uuid = game_node.get("uuid", "")
self.config["x_game_uuid"] = game_uuid
self.load_game_info(game_uuid)
self.load_options_from_tree(self.tree)
"""
if self.root.find("options"):
for node in self.root.find("options"):
print(node.tag)
print(node.text)
key = node.tag.replace("-", "_")
value = node.text
if key == "viewport":
if "=" in value:
value = value.replace("=", "=>")
value = value.replace("==>", "=>")
else:
value = "* * * * => " + value
viewport.append(value)
continue
self.options[key] = value
"""
if self.viewport:
self.options["viewport"] = ", ".join(self.viewport)
#self.options["sub_title"] = amiga_model + " - FS-UAE"
#self.options["sub_title"] = "FS-UAE ({0})".format(amiga_model)
if cd_based:
self.load_cdroms()
else:
self.load_floppies()
self.load_hard_drives()
# for now, just copy all options to config without checking
for key, value in self.options.iteritems():
self.config[key] = value
self.set_name_and_uuid()
def set_name_and_uuid(self):
#self.config["x_config_uuid"] = self.root.get("uuid", "")
game_name = ""
platform_name = ""
variant_name = ""
name_node = self.root.find("name")
if name_node is not None:
variant_name = name_node.text.strip()
game_node = self.root.find("game")
if game_node is not None:
game_name_node = game_node.find("name")
if game_name_node is not None:
game_name = game_name_node.text.strip()
game_platform_node = game_node.find("platform")
if game_platform_node is not None:
platform_name = game_platform_node.text.strip()
parts = []
if platform_name:
parts.append(platform_name)
if variant_name:
parts.append(variant_name)
if game_name and variant_name:
config_name = u"{0} ({1})".format(game_name, u", ".join(parts))
self.config["__config_name"] = config_name
def load_cdroms(self):
print("\n\n\nload_cdroms\n\n\n")
media_list = self.build_media_list(cds=True)
for i, values in enumerate(media_list):
path, sha1 = values
if i < 1:
self.config["cdrom_drive_{0}".format(i)] = path
self.config["x_cdrom_drive_{0}_sha1".format(i)] = sha1
self.config["cdrom_image_{0}".format(i)] = path
self.config["x_cdrom_image_{0}_sha1".format(i)] = sha1
def load_options_from_tree(self, tree):
root = tree.getroot()
if root.find("options"):
for node in root.find("options"):
print(node.tag)
print(node.text)
key = node.tag.replace("-", "_")
value = node.text or ""
self.load_option(key, value)
def load_option(self, key, value):
if key == "viewport":
if "=" in value:
value = value.replace("=", "=>")
value = value.replace("==>", "=>")
else:
value = "* * * * => " + value
self.viewport.append(value)
elif key == "whdload_args":
self.options["x_whdload_args"] = value
elif key == "kickstart":
if value == "2.0+":
self.options["amiga_model"] = "A600"
elif value == "AROS":
self.options["kickstart_file"] = "internal"
else:
# FIXME: print warning
pass
elif key == "cracktro":
# FIXME: handle
pass
else:
self.options[key] = value
def load_game_info(self, uuid):
print("load_game_info", uuid)
path = Database.get_instance().find_game(uuid=uuid)
if not path:
print("game xml file not found")
return
tree = ElementTree()
tree.parse(path)
self.config["x_game_xml_path"] = path
self.load_options_from_tree(tree)
def load_floppies(self):
media_list = self.build_media_list(floppies=True)
floppy_drive_count = 4
if "floppy_drive_count" in self.options:
try:
floppy_drive_count = int(self.options["floppy_drive_count"])
except ValueError:
floppy_drive_count = 1
floppy_drive_count = max(0, min(4, floppy_drive_count))
for i, values in enumerate(media_list):
path, sha1 = values
if i < floppy_drive_count:
self.config[u"floppy_drive_{0}".format(i)] = path
self.config[u"x_floppy_drive_{0}_sha1".format(i)] = sha1
self.config[u"floppy_image_{0}".format(i)] = path
self.config[u"x_floppy_image_{0}_sha1".format(i)] = sha1
if floppy_drive_count < 4:
self.config["floppy_drive_count"] = floppy_drive_count
def build_media_list(self, floppies=False, cds=False, hds=False):
media_list = []
added = set()
file_nodes = self.root.findall("file")
for file_node in file_nodes:
print(file_node)
sha1 = ""
#for cs_node in file_node.findall("checksum"):
# if cs_node.get("type") == "sha1":
# sha1 = cs_node.text.strip()
sha1_node = file_node.find("sha1")
if sha1_node is not None:
sha1 = sha1_node.text.strip()
print("sha1", sha1)
type = file_node.get("type", "")
name = file_node.find("name").text.strip()
if name.startswith("HardDrive/"):
if hds:
p = os.path.join(self.path, "HardDrive")
if p in added:
# already added
continue
added.add(p)
# FIXME: hack for now
sha1 = "01234567-89ab-cdef-0123-456789abcdef"
media_list.append((p, sha1))
else:
continue
base, ext = os.path.splitext(name)
#if type == "hd" and not hds:
# continue
if hds:
#if type and not type == "HD":
# continue
if ext not in [".zip"]:
continue
elif cds:
#if type and not type == "cd":
# continue
if ext not in [".cue", ".iso"]:
continue
if "(Track" in base:
# probably part of a split multi-track cue
continue
elif floppies:
#if type and not type == "floppy":
# continue
if ext not in [".adf", ".adz", ".dms", ".ipf"]:
continue
url_node = file_node.find("url")
if url_node is not None:
url = url_node.text.strip()
else:
url = ""
path = ""
found_sha1 = ""
if sha1:
print(sha1)
path = Database.get_instance().find_file(sha1=sha1)
if path:
found_sha1 = sha1
if url and not path:
path = url
found_sha1 = sha1
if not path:
path = Database.get_instance().find_file(name=name)
if not path:
if self.path:
# loaded from an external XML file:
path = os.path.join(self.path, name)
if path:
media_list.append((path, found_sha1))
else:
pass
#return False
# FIXME: handle it with a visible error message
#raise Exception("could not find file " + repr(name))
return media_list
def load_hard_drives(self):
print("load_hard_drives")
media_list = self.build_media_list(hds=True)
print(media_list)
for i, values in enumerate(media_list):
path, sha1 = values
self.config[u"hard_drive_{0}".format(i)] = path
self.config[u"x_hard_drive_{0}_sha1".format(i)] = sha1
|
cnvogelg/fs-uae-gles
|
launcher/fs_uae_launcher/XMLConfigLoader.py
|
Python
|
gpl-2.0
| 10,017
|
[
"ADF"
] |
737978590c29897f6ed9ab072a86edcd51c02a4c624587d8727956bcfcdb8d25
|
import os
import json
import configparser
import better_exceptions
from datetime import datetime
from operator import itemgetter
from jinja2 import Environment, FileSystemLoader
better_exceptions.MAX_LENGTH = None
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
def atom_date(timestamp):
# Convert timestamp to a ATOM-compatible datetime
timestamp = int(timestamp)
return datetime.utcfromtimestamp(timestamp).isoformat('T') + 'Z'
def rss_date(timestamp):
# Convert timestamp to a RFC822 datetime
timestamp = int(timestamp)
return datetime.utcfromtimestamp(timestamp).strftime('%a, %e %b %Y %H:%M:%S') + 'Z'
def get_id(episode, dltype=''):
# Return what we'll use as id for linking
return episode.replace('_', '').replace('x', '') + dltype
def get_cat(folder):
# Return the category name from the folder name
if folder == 'comic':
return 'Main Comics (2014-15)'
elif folder == '2017comic':
return 'Main Comics (2017)'
elif folder == 'book':
return 'Books'
elif folder == 'special':
return 'Specials'
elif folder == 'sucg':
return 'Steven and the Crystal Gems'
elif folder == 'harmony':
return 'Harmony'
else:
return 'Others'
def format_epnumber(episode):
# Return proper formated season and episode details
if episode[:2] == '99':
return 'Movie'
return episode.replace('_', ' & ').replace('00x', 'Special ')
def split_season_episode(episode):
split = episode.split('x')
return [split[0].replace('06', 'Future').replace('99', 'Movie').replace('00', 'Special'), split[1]]
def gen_dl_page():
# Main function
# Loading our config
config = configparser.ConfigParser()
config.read(os.path.join(THIS_DIR, 'config', 'dl.ini'))
# Initialize some variables
preair = []
sufuture = []
itunes = []
individual = []
comics = []
musics = []
# Load every "preair" episodes
for episode in dict(config.items('preair')):
# Parse episode data
data = config['preair'][episode].split(',')
se = split_season_episode(episode)
# Add it to the list
preair.append({
'id': get_id(episode, 'tmp'),
'code': format_epnumber(episode),
'season': se[0],
'episode': se[1],
'title': data[0],
'filename': data[1],
'torrent': data[2],
'ctoon': data[3],
'daily': data[4],
'date': data[5]
})
# Load every "future" episodes
for episode in dict(config.items('sufuture')):
# Parse episode data
data = config.get('sufuture', episode).split(',')
# Add it to the list
sufuture.append({
'id': get_id(episode, 'f'),
'code': episode,
'season': 'Future',
'episode': episode,
'title': data[0],
'filename': data[1],
'ctoon': data[2],
'date': data[3]
})
# Load every "itunes" episodes
for episode in dict(config.items('itunes')):
# Parse episode data
data = config.get('itunes', episode).split(',')
se = split_season_episode(episode)
# Add it to the list
itunes.append({
'id': get_id(episode, 'i'),
'code': format_epnumber(episode),
'season': se[0],
'episode': se[1],
'title': data[0],
'filename': data[1],
'torrent': data[2],
'ctoon': data[3],
'date': data[4]
})
# Load every "individual" episodes
for episode in dict(config.items('individual')):
# Parse episode data
data = config.get('individual', episode).split(',')
se = split_season_episode(episode)
# Add it to the list
individual.append({
'id': get_id(episode, 'm'),
'code': format_epnumber(episode),
'season': se[0],
'episode': se[1],
'title': data[0],
'filename': data[1],
'ctoon': data[2],
'date': data[3]
})
# Load comics and books
for comic in dict(config.items('comics')):
# Parse episode data
data = config.get('comics', comic).split(',')
# Add it to the list
comics.append({
'id': comic.upper(),
'title': data[0],
'folder': data[1],
'category': get_cat(data[1]),
'cbz': data[2],
'cbr': data[3],
'pdf': data[4],
'epub': data[5],
'date': data[6]
})
# Load music
for music in dict(config.items('music')):
# Parse episode data
data = config.get('music', music).split(',')
# Add it to the list
musics.append({
'id': music.upper(),
'title': data[0],
'mp3': data[1],
'flac': data[2],
'date': data[3]
})
# Sort episodes
preair = sorted(preair, key=itemgetter('code'))
sufuture = sorted(sufuture, key=itemgetter('code'))
itunes = sorted(itunes, key=itemgetter('code'))
individual = sorted(individual, key=itemgetter('code'))
comics = sorted(comics, key=itemgetter('category'))
musics = sorted(musics, key=itemgetter('id'))
# Get current date
dategen = datetime.utcnow().strftime('%B %d %Y at %H:%M:%S')
atomnow = datetime.utcnow().isoformat('T') + 'Z'
# Load the environment for the templates
j2_html = Environment(loader=FileSystemLoader(os.path.join(THIS_DIR, 'templates', 'html')), trim_blocks=True)
j2_xml = Environment(loader=FileSystemLoader(os.path.join(THIS_DIR, 'templates', 'xml')), trim_blocks=True)
j2_xml.filters['atomdate'] = atom_date
j2_xml.filters['rssdate'] = rss_date
# Generate html
j2_html.get_template('dl.html').stream(
pagetype='page-dl', pagename='Downloads', pagedesc='Direct links or torrents to episodes', dategen=dategen,
# Add episodes/comics/musics lists
preair=preair, sufuture=sufuture, itunes=itunes, individual=individual, comics=comics, musics=musics,
# If there's nothing, we'll not display the "preair" list
lenpa=len(preair))\
.dump(os.path.join(THIS_DIR, 'public', 'dl.html'))
# Concatenate every list and sort by date for the feeds
allep = preair + sufuture + itunes + individual
allep = sorted(allep, key=itemgetter('date'), reverse=True)[:15]
# Generate feeds
j2_xml.get_template('dl-atom.xml').stream(episodes=allep, lastupdate=atomnow)\
.dump(os.path.join(THIS_DIR, 'public', 'dl.xml'))
j2_xml.get_template('dl-rss.xml').stream(episodes=allep)\
.dump(os.path.join(THIS_DIR, 'public', 'dl.rss'))
def gen_dl_api():
# Generate API endpoint
# Loading our config
config = configparser.ConfigParser()
config.read(os.path.join(THIS_DIR, 'config', 'dl.ini'))
# Initialize some variables
preair = []
sufuture = []
itunes = []
individual = []
comics = []
# Load every "preair" episodes
for episode in dict(config.items('preair')):
# Parse episode data
data = config['preair'][episode].split(',')
se_ep = split_season_episode(episode)
if data[2] == '1':
torrent = 'https://cadl.sug.rocks/preair/' + data[1] + '.torrent'
else:
torrent = None
# Add it to the list
preair.append({
'id': int(get_id(episode)),
'season': se_ep[0],
'episode': se_ep[1],
'title': data[0],
'url': 'https://cadl.sug.rocks/preair/' + data[1],
'ctoon': 'https://ctoon.party/show/sun/watch/' + data[3],
'dailymotion': 'www.dailymotion.com/video/' + data[4],
'torrent': torrent,
'date': int(data[5])
})
# Load every "sufuture" episodes
for episode in dict(config.items('sufuture')):
# Parse episode data
data = config['sufuture'][episode].split(',')
torrent = None
# Add it to the list
sufuture.append({
'id': int(get_id(episode)),
'season': 'Future',
'episode': episode,
'title': data[0],
'url': 'https://cadl.sug.rocks/' + data[1],
'ctoon': 'https://ctoon.party/show/sun/watch/' + data[2],
'dailymotion': None,
'torrent': None,
'date': int(data[3])
})
# Load every "itunes" episodes
for episode in dict(config.items('itunes')):
# Parse episode data
data = config.get('itunes', episode).split(',')
se_ep = split_season_episode(episode)
if data[2] == '1':
torrent = 'https://cadl.sug.rocks/torrents/' + data[1] + '.torrent'
else:
torrent = None
# Add it to the list
itunes.append({
'id': int(get_id(episode)),
'season': se_ep[0],
'episode': se_ep[1],
'title': data[0],
'url': 'https://cadl.sug.rocks/' + data[1],
'ctoon': 'https://ctoon.party/show/sun/watch/' + data[3],
'dailymotion': None,
'torrent': torrent,
'date': int(data[4])
})
# Load every "individual" episodes
for episode in dict(config.items('individual')):
# Parse episode data
data = config.get('individual', episode).split(',')
se_ep = split_season_episode(episode)
# Add it to the list
individual.append({
'id': int(get_id(episode)),
'season': se_ep[0],
'episode': se_ep[1],
'title': data[0],
'url': 'https://cadl.sug.rocks/mega/' + data[1],
'ctoon': 'https://ctoon.party/show/sun/watch/' + data[2],
'dailymotion': None,
'torrent': None,
'date': int(data[3])
})
# Load comics and books
for comic in dict(config.items('comics')):
# Parse episode data
data = config.get('comics', comic).split(',')
# Add it to the list
if data[2] == '1':
cbz = 'https://cadl.sug.rocks/comics/' + data[1] + '/SUG-CBZ-' + comic.upper() + '.cbz'
else:
cbz = None
if data[3] == '1':
cbr = 'https://cadl.sug.rocks/comics/' + data[1] + '/SUG-CBR-' + comic.upper() + '.cbr'
else:
cbr = None
if data[4] == '1':
pdf = 'https://cadl.sug.rocks/comics/' + data[1] + '/SUG-PDF-' + comic.upper() + '.pdf'
else:
pdf = None
if data[5] == '1':
epub = 'https://cadl.sug.rocks/comics/' + data[1] + '/SUG-EPUB-' + comic.upper() + '.epub'
else:
epub = None
comics.append({
'id': comic.upper(),
'title': data[0],
'category': get_cat(data[1]),
'cbz': cbz,
'cbr': cbr,
'pdf': pdf,
'epub': epub,
'date': int(data[6])
})
# Sort episodes
preair = sorted(preair, key=itemgetter('id'))
sufuture = sorted(sufuture, key=itemgetter('id'))
itunes = sorted(itunes, key=itemgetter('id'))
individual = sorted(individual, key=itemgetter('id'))
comics = sorted(comics, key=itemgetter('category'))
# Make our json object
api = {}
api['_'] = {"generated": int(datetime.utcnow().timestamp())}
api['preair'] = preair
api['sufuture'] = sufuture
api['itunes'] = itunes
api['individual'] = individual
api['comics'] = comics
# Save it
with open(os.path.join(THIS_DIR, 'api', 'dl.json'), 'w') as f:
f.write(json.dumps(api, indent=2, sort_keys=True))
if __name__ == '__main__':
# Just start our function
gen_dl_page()
gen_dl_api()
|
sugrocks/website
|
dllist.py
|
Python
|
mit
| 11,902
|
[
"CRYSTAL"
] |
3027659ef55ae55511d5403c26d803d2c178351fc387c19e8c186809b2a1fdc6
|
#!/usr/bin/env python
# Author : Pierre Schnizer
"""
Wrapper for the ode solver of gsl. This solver wraps all features as descirbed
in Chapter 25 of the gsl documentation.
The _odeiv file provides the low level wrapper. Direct usage at your special
own risk.
Here is the pythonic version of the example from the gsl documentation.
import odeiv
mu = 10.0
def func(t, y):
f = Numeric.zeros((2,), Numeric.Float) * 1.0
f[0] = y[1]
f[1] = -y[0] - mu * y[1] * (y[0] ** 2 -1);
return f
def jac(t, y):
dfdy = Numeric.zeros((2,2), Numeric.Float)
dfdy[0, 0] = 0.0
dfdy[0, 1] = 1.0
dfdy[1, 0] = -2.0 * mu * y[0] * y[1] - 1.0
dfdy[1, 1] = -mu * (y[0]**2 - 1.0)
dfdt = Numeric.zeros((2,))
return dfdy, dfdt
dimension = 2
step = odeiv.step_gear1(dimension, func, jac)
control = odeiv.control_y_new(step, 1e-6, 1e-6)
evolve = odeiv.evolve(step, control, dimension)
h = 1
t = 0.0
t1 = 100.0
y = (1.0, 0.0)
while t<t1:
t, h, y = evolve.apply(t, t1, h, y)
print t, y[0], y[1]
"""
#
# Copyright (c) 2002 by Pierre Schnizer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
##
# author: Pierre Schnizer
# created: December 2002
# file: pygsl/src/odeiv/odeiv.py
import _callback
class __step:
"""
The lowest level components are the stepping functions which advance a
solution from time t to t+h for a fixed step-size h and estimate the
resulting local error.
Pure virtual class. Use a derived Object instead.
These objects are:
step_rk2
step_rk4
step_rkf45
step_rkck
step_rk8pd
step_rk2imp
step_rk4imp
step_bsimp
step_gear1
step_gear2
"""
def __init__(self, dims, func, jac=None, args=None):
"""
dimension ... the dimension of the system
func ... the system descirbing the function
jac ... the jacobian matrix. optional
args ... additional arguments to pass to the function. optional
"""
self.ptr = None
if not hasattr(self, 'type'):
raise TypeError, """You can not use step directly. You should use
one of the derived classes!"""
self.ptr = _callback.gsl_odeiv_step_alloc(self.type, dims)
self.func = func
if jac == None:
if self.need_jacobian >= 1:
raise ValueError, """This step object must use an jacobian
matrix!"""
self.jac = None
else:
self.jac = jac
self.args = args
def __del__(self):
if hasattr(self, 'ptr'):
if self.ptr != None:
_callback.gsl_odeiv_step_free(self.ptr)
def reset(self):
_callback.gsl_odeiv_step_reset(self.ptr)
def apply(self, t, h, y_in, dydt):
"""
Input t, h, y_in, dydt, func, jac:
t ... start time t
h ... step size
y_in ... start vector
dydt ... derivatives of the system at t. If not known supply None
Output:
y, yerr, dydt:
y_out ... vector at t+h
yerr ... the estimate of the absolute errors
dydt ... the derivatives of the system at t
This method applies the stepping function to the system of equations
defined by func and jac, using the step size h to advance the system
from time t and state y to time t+h. The new state of the system is
stored in y_out on output, with an estimate of the absolute error in
each component stored in yerr. If the argument dydt_in is not None it
should provide an array containing the derivatives for the system at
time t on input. This is optional as the derivatives will be computed
internally if they are not provided, but allows the reuse of existing
derivative information. On output the new derivatives of the system at
time t+h will be stored in given in.
"""
return _callback.gsl_odeiv_step_apply(self.ptr, t, h, y_in, dydt,
self.func, self.jac, self.args)
def order(self):
"""
This method returns the order of the stepping function on the previous
step. This order can vary if the stepping function itself is adaptive.
"""
return _callback.gsl_odeiv_step_order(self.ptr)
def name(self):
"""
This function returns the name of the stepping function.
"""
return _callback.gsl_odeiv_step_name(self.ptr)
def _get_ptr(self):
return self.ptr
def _get_func(self):
return self.func
def _get_jac(self):
return self.jac
def _get_args(self):
return self.args
class step_rk2(__step):
"""
Embedded 2nd order Runge-Kutta with 3rd order error estimate.
"""
type = _callback.cvar.gsl_odeiv_step_rk2
need_jacobian = 0
class step_rk4(__step):
"""
4th order (classical) Runge-Kutta.
"""
type = _callback.cvar.gsl_odeiv_step_rk4
need_jacobian = 0
class step_rkf45(__step):
"""
Embedded 4th order Runge-Kutta-Fehlberg method with 5th order error
estimate. This method is a good general-purpose integrator.
"""
type = _callback.cvar.gsl_odeiv_step_rkf45
need_jacobian = 0
class step_rkck(__step):
"""
Embedded 4th order Runge-Kutta Cash-Karp method with 5th order error
estimate.
"""
type = _callback.cvar.gsl_odeiv_step_rkck
need_jacobian = 0
class step_rk8pd(__step):
"""
Embedded 8th order Runge-Kutta Prince-Dormand method with 9th order error
estimate.
"""
type = _callback.cvar.gsl_odeiv_step_rk8pd
need_jacobian = 0
class step_rk2imp(__step):
"""
Implicit 2nd order Runge-Kutta at Gaussian points
"""
type = _callback.cvar.gsl_odeiv_step_rk2imp
need_jacobian = 0
class step_rk4imp(__step):
"""
Implicit 4th order Runge-Kutta at Gaussian points
"""
type = _callback.cvar.gsl_odeiv_step_rk4imp
need_jacobian = 0
class step_bsimp(__step):
"""
Implicit Bulirsch-Stoer method of Bader and Deuflhard. This algorithm
requires the Jacobian.
"""
type = _callback.cvar.gsl_odeiv_step_bsimp
need_jacobian = 1
class step_gear1(__step):
"""
M=1 implicit Gear method
"""
type = _callback.cvar.gsl_odeiv_step_gear1
need_jacobian = 0
class step_gear2(__step):
"""
M=2 implicit Gear method
"""
type = _callback.cvar.gsl_odeiv_step_gear2
need_jacobian = 0
HADJ_DEC = _callback.gsl_odeiv_hadj_dec
HADJ_INC = _callback.gsl_odeiv_hadj_inc
HADJ_NIL = _callback.gsl_odeiv_hadj_nil
class __control:
"""
The control function examines the proposed change to the solution and its
error estimate produced by a stepping function and attempts to determine
the optimal step-size for a user-specified level of error.
Pure virtual class for the control.
Use either control_standard_new or control_y_new or control_yp_new
"""
def __del__(self):
if hasattr(self, 'ptr'):
if self.ptr != None:
_callback.gsl_odeiv_control_free(self.ptr)
def hadjust(self, y, yerr, dydt, h):
"""
input: y, yerr, dydt
y ...
yerr ... the error estimate
dydt ...
h ... last step size
output: h, msg
h ... new step size
msg ... HADJ_DEC or HADJ_INC or HADJ_NIL. See text.
This method adjusts the step-size h using the current values of y,
yerr and dydt. If the error in the y-values yerr is found to be too
large then the step-size h is reduced and the function returns
HADJ_DEC. If the error is sufficiently small then h may be increased
and HADJ_INC is returned. The function returns HADJ_NIL if the
step-size is unchanged. The goal of the function is to estimate the
largest step-size which satisfies the user-specified accuracy
requirements for the current point.
"""
step = self.step._get_ptr()
h, msg = _callback.gsl_odeiv_control_hadjust(self.ptr, step, y, yerr,
dydt, h)
return h, msg
def name(self):
return _callback.gsl_odeiv_control_name(self.ptr)
def _get_ptr(self):
return self.ptr
class control_standard_new(__control):
"""
The standard control object is a four parameter
heuristic based on absolute and relative errors eps_abs and eps_rel, and
scaling factors a_y and a_dydt for the system state y(t) and derivatives
y'(t) respectively.
The step-size adjustment procedure for this method begins by computing the
desired error level D_i for each component,
D_i = eps_abs + eps_rel * (a_y |y_i| + a_dydt h |y'_i|)
and comparing it with the observed error E_i = |yerr_i|. If the observed
error E exceeds the desired error level D by more than 10% for any
component then the method reduces the step-size by an appropriate factor,
h_new = h_old * S * (D/E)^(1/q)
where q is the consistency order of method (e.g. q=4 for 4(5) embedded
RK), and S is a safety factor of 0.9. The ratio D/E is taken to be the
maximum of the ratios D_i/E_i.
If the observed error E is less than 50% of the desired error level D for
the maximum ratio D_i/E_i then the algorithm takes the opportunity to
increase the step-size to bring the error in line with the desired level,
h_new = h_old * S * (E/D)^(1/(q+1))
This encompasses all the standard error scaling methods.
"""
def __init__(self, step, eps_abs, eps_rel, a_y, a_dydt):
"""
input : eps_abs, eps_rel, a_y, a_dydt
See the docstring of this class for the meaning of the input.
"""
self.step = step
self.ptr = None
self.ptr = _callback.gsl_odeiv_control_standard_new(eps_abs, eps_rel,
a_y, a_dydt)
class control_y_new(__control):
"""
Creates a new control object which will keep the local error on each step
within an absolute error of eps_abs and relative error of eps_rel with
respect to the solution y_i(t). This is equivalent to the standard control
object with a_y=1 and a_dydt=0.
See also the documentation of the control_standard_new class
"""
def __init__(self, step, eps_abs, eps_rel):
"""
input : eps_abs, eps_rel
See the docstring of this class for the meaning of the input.
"""
self.step = step
self.ptr = None
self.ptr = _callback.gsl_odeiv_control_y_new(eps_abs, eps_rel)
class control_yp_new(__control):
"""
This function creates a new control object which will keep the local error
on each step within an absolute error of eps_abs and relative error of
eps_rel with respect to the derivatives of the solution y'_i(t) . This is
equivalent to the standard control object with a_y=0 and a_dydt=1.
"""
def __init__ (self, step, eps_abs, eps_rel):
"""
input : eps_abs, eps_rel
See the docstring of this class for the meaning of the input.
"""
self.step = step
self.ptr = None
self.ptr = _callback.gsl_odeiv_control_yp_new(eps_abs, eps_rel)
class evolve:
"""
The highest level of the system is the evolution function which combines
the results of a stepping function and control function to reliably
advance the solution forward over an interval (t_0, t_1). If the control
function signals that the step-size should be decreased the evolution
function backs out of the current step and tries the proposed smaller
step-size. This is process is continued until an acceptable step-size is
found.
"""
def __init__(self, step, control, dimension):
"""
input: step, control, dimension
step ... a step object
control ... a control object
dimension ... dimension of the problem
"""
# Keep a reference to the objects so that its pointers are valid
self.step = step
self.control = control
self.ptr = None
self.ptr = _callback.gsl_odeiv_evolve_alloc(dimension)
self.func = self.step._get_func()
self.jac = self.step._get_jac()
self.args = self.step._get_args()
tmp = self.step._get_ptr(), self.control._get_ptr(), self.ptr
self._solvers_tuple = tuple(tmp)
def __del__(self):
if hasattr(self, 'ptr'):
if self.ptr != None:
_callback.gsl_odeiv_evolve_free(self.ptr)
def reset(self):
"""
No input. No output
This method resets the evolution. It should be used whenever
the next use will not be a continuation of a previous step.
"""
_callback.gsl_odeiv_evolve_reset(self.ptr)
def apply(self, t, t1, h, y):
"""
input : t, t1, h, y
t ... start time
t1 ... end time
h ... initial step size
y ... start vector
output :
t ... reached time in the calculation
h ... reached step size
y ... end vector
This method advances the system from time t and position y using the
stepping function step. The new time and position are stored in t and
y on output. The initial step-size is taken as h, but this will be
modified to achieve the appropriate error bound if necessary. The
routine may make several calls to the step object in order to
determine the optimum step-size. If the step-size has been changed the
value of h will be modified on output. The maximum time t1 is
guaranteed not to be exceeded by the time-step. On the final
time-step the value of t will be set to t1 exactly.
"""
tmp = _callback.gsl_odeiv_evolve_apply(self._solvers_tuple,
self.func, self.jac, t, t1, h,
y, self.args)
return tmp
def apply_vector(self, t, t1, h, y, nsteps=1, hmax=None):
res = (nsteps,)
if hmax != None:
res = nsteps, hmax
tmp = _callback.gsl_odeiv_evolve_apply_vector(self._solvers_tuple,
self.func, self.jac, t, t1, h,
y, self.args, *res)
return tmp
|
juhnowski/FishingRod
|
production/pygsl-0.9.5/pygsl/odeiv.py
|
Python
|
mit
| 15,809
|
[
"Gaussian"
] |
23965fe2731b6c252c0950629dd51116b00f74f9bbb3df801783623c1cbb4fb2
|
"""This module contains all of the important meta-information for
Hypatia such as the author's name, the copyright and license, status,
and so on.
"""
__author__ = "Lillian Lemmer"
__copyright__ = "Copyright 2015 Lillian Lemmer"
__credits__ = ["Lillian Lemmer"]
__license__ = "MIT"
__maintainer__ = __author__
__site__ = "http://lillian-lemmer.github.io/hypatia/"
__email__ = "lillian.lynn.lemmer@gmail.com"
__status__ = "Development"
# specifically code contributors
__contributors__ = [
"Lillian Lemmer",
"Eric James Michael Ritz",
"Brian Houston Morrow",
"William D. Jones",
]
__version__ = '0.2.29'
|
brechin/hypatia
|
hypatia/__init__.py
|
Python
|
mit
| 706
|
[
"Brian"
] |
3263f1471a341935fa18a5b5e5b7d6cbcc4ccd484e99af0f11adefdc60e10acf
|
"""
Visualization of ARGs (Ancestral Recombination Graphs)
"""
# python imports
from itertools import chain, izip
import random
from math import *
# rasmus imports
from rasmus import util, treelib, stats, sets
# compbio imports
from compbio import arglib
# summon imports
import summon
from summon import sumtree
from summon.shapes import box
from summon.core import *
#=============================================================================
# visualization
def minlog(x, default=10):
return log(max(x, default))
def layout_arg_leaves(arg):
"""Layout the leaves of an ARG"""
basetree = treelib.Tree()
nodes = list(arg.postorder())
nodes.sort(key=lambda x: x.age)
lookup = {}
for node in nodes:
if node.is_leaf():
lookup[node] = basetree.new_node(node.name)
else:
basechildren = []
for child in node.children:
basechild = lookup[child]
while basechild.parent:
basechild = basechild.parent
basechildren.append(basechild)
basechildren = util.unique(basechildren)
if len(basechildren) > 1:
lookup[node] = basenode = basetree.new_node(node.name)
for basechild in basechildren:
basetree.add_child(basenode, basechild)
else:
lookup[node] = basechildren[0]
basetree.root = lookup[nodes[-1]]
# assign layout based on basetree layout
# layout leaves
return dict((arg[name], i) for i, name in enumerate(basetree.leaf_names()))
def layout_arg(arg, leaves=None, yfunc=lambda x: x):
"""Layout the nodes of an ARG"""
layout = {}
# layout leaves
if leaves is None:
leafx = layout_arg_leaves(arg)
else:
leafx = util.list2lookup(leaves)
for node in arg.postorder():
if node.is_leaf():
layout[node] = [leafx[node], yfunc(node.age)]
else:
layout[node] = [
stats.mean(layout[child][0] for child in node.children),
yfunc(node.age)]
return layout
def map_layout(layout, xfunc=lambda x: x, yfunc=lambda x: x):
for node, (x, y) in layout.items():
layout[node] = [xfunc(x), yfunc(y)]
return layout
def get_branch_layout(layout, node, parent, side=0, recomb_width=.4):
"""Layout the branches of an ARG"""
nx, ny = layout[node]
px, py = layout[parent]
if node.event == "recomb":
if len(node.parents) == 2 and node.parents[0] == node.parents[1]:
step = recomb_width * [-1, 1][side]
else:
step = recomb_width * [-1, 1][node.parents.index(parent)]
return [nx+step, ny, nx+step, py]
else:
return [nx, ny, nx, py]
def show_arg(arg, layout=None, leaves=None, mut=None, recomb_width=.4,
win=None):
"""Visualize an ARG"""
if win is None:
win = summon.Window()
else:
win.clear_groups()
# ensure layout
if layout is None:
layout = layout_arg(arg, leaves)
# callbacks
def branch_click(node, parent):
print node.name, parent.name
# draw ARG
win.add_group(draw_arg(arg, layout, recomb_width=recomb_width,
branch_click=branch_click))
# draw mutations
if mut:
g = group()
for node, parent, pos, t in mut:
x1, y1, x2, y2 = get_branch_layout(layout, node, parent)
g.append(group(draw_mark(x1, t, col=(0,0,1)), color(1,1,1)))
win.add_group(g)
return win
def draw_arg(arg, layout, recomb_width=.4, branch_click=None):
def branch_hotspot(node, parent, x, y, y2):
def func():
branch_click(node, parent)
return hotspot("click", x-.5, y, x+.5, y2, func)
# draw branches
g = group(color(1,1,1))
for node in layout:
if not node.is_leaf():
x, y = layout[node]
for i, child in enumerate(node.children):
cx, cy = layout[child]
x1, y1, x2, y2 = get_branch_layout(
layout, child, node, i, recomb_width=recomb_width)
g.append(line_strip(x, y, x2, y2, x1, y1, cx, cy))
if branch_click:
g.append(branch_hotspot(child, node, x1, y1, y2))
# draw recomb
for node in layout:
if node.event == "recomb":
x, y = layout[node]
g.append(draw_mark(x, y, col=(1, 0, 0)))
return g
def show_marginal_trees(arg, mut=None):
win = summon.Window()
x = 0
step = 2
treewidth = len(list(arg.leaves())) + step
def trans_camera(win, x, y):
v = win.get_visible()
win.set_visible(v[0]+x, v[1]+y, v[2]+x, v[3]+y, "exact")
win.set_binding(input_key("]"), lambda : trans_camera(win, treewidth, 0))
win.set_binding(input_key("["), lambda : trans_camera(win, -treewidth, 0))
blocks = arglib.iter_recomb_blocks(arg)
for tree, block in izip(arglib.iter_marginal_trees(arg), blocks):
pos = block[0]
print pos
leaves = sorted((x for x in tree.leaves()), key=lambda x: x.name)
layout = layout_arg(tree, leaves)
win.add_group(
translate(x, 0, color(1,1,1),
draw_tree(tree, layout),
text_clip(
"%d-%d" % (block[0], block[1]),
treewidth*.05, 0,
treewidth*.95, -max(l[1] for l in layout.values()),
4, 20,
"center", "top")))
# mark responsible recomb node
for node in tree:
if pos != 0.0 and node.pos == pos:
nx, ny = layout[node]
win.add_group(draw_mark(x + nx, ny))
# draw mut
if mut:
for node, parent, mpos, t in mut:
if (node.name in tree and node.name != tree.root.name and
block[0] < mpos < block[1]):
nx, ny = layout[tree[node.name]]
win.add_group(draw_mark(x + nx, t, col=(0,0,1)))
if node.name in tree and tree[node.name].parents:
nx, ny = layout[tree[node.name]]
py = layout[tree[node.name].parents[0]][1]
start = arg[node.name].data["ancestral"][0][0]
win.add_group(lines(color(0,1,0),
x+nx, ny, x+nx, py,
color(1,1,1)))
x += treewidth
win.set_visible(* win.get_root().get_bounding() + ("exact",))
return win
def show_tree_track(tree_track, mut=None, show_labels=False,
use_blocks=False, branch_click=None):
"""
tree_track = [((start, end), tree), ...]
"""
def draw_labels(tree, layout):
return group(*
[text_clip(leaf.name, layout[leaf][0], layout[leaf][1],
1, layout[leaf][1] + 1e4, 4, 20, "middle", "left")
for leaf in tree.leaves()])
def branch_hotspot(node, parent, x, y, y2):
def func():
branch_click(node, parent)
return hotspot("click", x-.5, y, x+.5, y2, func)
def print_branch(node, parent):
print "node", node.name
tree_track = iter(tree_track)
if mut:
mut = util.PushIter(mut)
block, tree = tree_track.next()
if branch_click is True:
branch_click = print_branch
win = summon.Window()
treex = 0
step = 2
treewidth = len(list(tree.leaves())) + step
def trans_camera(win, x, y):
v = win.get_visible()
win.set_visible(v[0]+x, v[1]+y, v[2]+x, v[3]+y, "exact")
win.set_binding(input_key("]"), lambda : trans_camera(win, treewidth, 0))
win.set_binding(input_key("["), lambda : trans_camera(win, -treewidth, 0))
for block, tree in chain([(block, tree)], tree_track):
pos = block[0]
print pos
layout = treelib.layout_tree(tree, xscale=1, yscale=1)
treelib.layout_tree_vertical(layout, leaves=0)
g = win.add_group(
translate(treex, 0, color(1,1,1),
sumtree.draw_tree(tree, layout,
vertical=True),
(draw_labels(tree, layout) if show_labels else group()),
text_clip(
"%d-%d" % (block[0], block[1]),
treewidth*.05, 0,
treewidth*.95, -max(l[1] for l in layout.values()),
4, 20,
"center", "top")))
clicking = group()
g.append(clicking)
# hotspots
if branch_click:
for node in tree:
if node.parent:
x, y = layout[node]
x2, y2 = layout[node.parent]
clicking.append(branch_hotspot(node, node.parent, x, y, y2))
#win.add_group(clicking)
# draw mut
if mut:
for mpos, age, chroms in mut:
if block[0] < mpos < block[1]:
node = arglib.split_to_tree_branch(tree, chroms)
parent = node.parent
if node and parent:
t = random.uniform(layout[node][1], layout[parent][1])
nx, ny = layout[node]
win.add_group(draw_mark(treex + nx, t, col=(0,0,1)))
elif mpos > block[1]:
mut.push((mpos, age, chroms))
break
treex += treewidth
#win.set_visible(* win.get_root().get_bounding() + ("exact",))
win.home("exact")
return win
def show_coal_track(tree_track):
win = summon.Window()
bgcolor = (1, 1, 1, .1)
cmap = util.rainbow_color_map(low=0.0, high=1.0)
maxage = 0
for (start, end), tree in tree_track:
print start
l = []
times = treelib.get_tree_timestamps(tree)
nleaves = len(tree.leaves())
maxage2 = 0
for node in tree:
if len(node.children) > 1:
age = times[node]
freq = len(node.leaves()) / float(nleaves)
l.extend([color(*cmap.get(freq)), start, age, end, age])
if age > maxage2:
maxage2 = age
win.add_group(group(lines(*l), color(*bgcolor),
box(start, 0, end, maxage2, fill=True)))
if maxage2 > maxage:
maxage = maxage2
# hotspot
def func():
x, y = win.get_mouse_pos()
print "pos=%s age=%f" % (util.int2pretty(int(x)), y)
win.add_group(hotspot("click", 0, 0, end, maxage,
func))
win.home("exact")
return win
def show_smc(smc, mut=None, show_labels=False, branch_click=None,
use_names=False):
"""
"""
def draw_labels(tree, layout):
return group(*
[text_clip(names[leaf.name],
layout[leaf][0] - .4, layout[leaf][1],
layout[leaf][0] + .4, layout[leaf][1] - 1e4,
4, 20, "top", "center")
for leaf in tree.leaves()])
def branch_hotspot(node, parent, x, y, y2):
def func():
branch_click(node, parent)
return hotspot("click", x-.5, y, x+.5, y2, func)
def print_branch(node, parent):
print "node", node.name
def trans_camera(win, x, y):
v = win.get_visible()
win.set_visible(v[0]+x, v[1]+y, v[2]+x, v[3]+y, "exact")
def on_scroll_window(win):
region = win.get_visible()
print region
def on_resize_window(win):
region = win.get_visible()
print region
branch_color = (1, 1, 1)
spr_color = (1, 0, 0, .5)
recomb_color = (1, 0, 0)
# create window
win = summon.Window()
win.set_binding(input_key("]"), lambda : trans_camera(win, treewidth, 0))
win.set_binding(input_key("["), lambda : trans_camera(win, -treewidth, 0))
win.add_view_change_listener(lambda : on_scroll_window(win))
#win.remove_resize_listener(lambda : on_resize_window(win))
treex = 0
step = 2
names = []
seq_range = [0, 0]
treewidth = 10
tree = None
layout = None
for item in smc:
if item["tag"] == "NAMES":
names = item["names"]
if not use_names:
names = map(str, range(len(names)))
treewidth = len(names)
elif item["tag"] == "RANGE":
seq_range = [item["start"], item["end"]]
elif item["tag"] == "TREE":
tree = item["tree"]
layout = treelib.layout_tree(tree, xscale=1, yscale=1)
treelib.layout_tree_vertical(layout, leaves=0)
#map_layout(layout, yfunc=minlog)
region_text = text_clip("%d-%d" % (item["start"], item["end"]),
treewidth*.05, 0,
treewidth*.95, -max(l[1] for l in layout.values()),
4, 20,
"center", "top")
g = win.add_group(
translate(treex, 0, color(1,1,1),
sumtree.draw_tree(tree, layout,
vertical=True),
(draw_labels(tree, layout)
if show_labels else group()),
zoom_clamp(translate(0, -20, region_text),
axis=(treewidth, 0),
miny=1.0, maxy=1.0)
))
clicking = group()
g.append(clicking)
elif item["tag"] == "SPR":
rx, ry = layout[tree[item["recomb_node"]]]
ry = item["recomb_time"]
cx, cy = layout[tree[item["coal_node"]]]
cy = item["coal_time"]
g.append(
group(
lines(color(*spr_color), rx, ry, cx, cy),
mark_tree(tree, layout,
item["recomb_node"], time=item["recomb_time"],
col=recomb_color)))
treex += treewidth + step
'''
tree_track = iter(tree_track)
if mut:
mut = util.PushIter(mut)
block, tree = tree_track.next()
if branch_click is True:
branch_click = print_branch
win = summon.Window()
treex = 0
step = 2
treewidth = len(list(tree.leaves())) + step
def trans_camera(win, x, y):
v = win.get_visible()
win.set_visible(v[0]+x, v[1]+y, v[2]+x, v[3]+y, "exact")
win.set_binding(input_key("]"), lambda : trans_camera(win, treewidth, 0))
win.set_binding(input_key("["), lambda : trans_camera(win, -treewidth, 0))
for block, tree in chain([(block, tree)], tree_track):
pos = block[0]
print pos
layout = treelib.layout_tree(tree, xscale=1, yscale=1)
treelib.layout_tree_vertical(layout, leaves=0)
g = win.add_group(
translate(treex, 0, color(1,1,1),
sumtree.draw_tree(tree, layout,
vertical=True),
(draw_labels(tree, layout) if show_labels else group()),
text_clip(
"%d-%d" % (block[0], block[1]),
treewidth*.05, 0,
treewidth*.95, -max(l[1] for l in layout.values()),
4, 20,
"center", "top")))
clicking = group()
g.append(clicking)
# hotspots
if branch_click:
for node in tree:
if node.parent:
x, y = layout[node]
x2, y2 = layout[node.parent]
clicking.append(branch_hotspot(node, node.parent, x, y, y2))
#win.add_group(clicking)
# draw mut
if mut:
for mpos, age, chroms in mut:
if block[0] < mpos < block[1]:
node = arglib.split_to_tree_branch(tree, chroms)
parent = node.parent
if node and parent:
t = random.uniform(layout[node][1], layout[parent][1])
nx, ny = layout[node]
win.add_group(draw_mark(treex + nx, t, col=(0,0,1)))
elif mpos > block[1]:
mut.push((mpos, age, chroms))
break
treex += treewidth
'''
win.home("exact")
return win
def show_coal_track3(tree_track):
win = summon.Window()
bgcolor = (1, 1, 1, .1)
cmap = util.rainbow_color_map(low=0.5, high=1.0)
maxage = 0
for (start, end), tree in tree_track:
print start
l = []
times = treelib.get_tree_timestamps(tree)
nleaves = len(tree.leaves())
maxage2 = 0
for node in tree:
if len(node.children) > 1:
age = times[node]
sizes = [len(x.leaves()) for x in node.children]
bias = max(sizes) / float(sum(sizes))
l.extend([color(*cmap.get(bias)), start, age, end, age])
if age > maxage2:
maxage2 = age
win.add_group(group(lines(*l), color(*bgcolor),
box(start, 0, end, maxage2, fill=True)))
if maxage2 > maxage:
maxage = maxage2
def func():
x, y = win.get_mouse_pos()
print "pos=%s age=%f" % (util.int2pretty(int(x)), y)
win.add_group(hotspot("click", 0, 0, end, maxage,
func))
win.home("exact")
return win
def show_coal_track2(tree_track):
win = summon.Window()
bgcolor = (1, 1, 1, .1)
cmap = util.rainbow_color_map(low=0.0, high=1.0)
tracks = {}
maxage = 0
for (start, end), tree in tree_track:
print start
l = []
times = treelib.get_tree_timestamps(tree)
nleaves = len(tree.leaves())
maxage2 = 0
for node in tree:
if len(node.children) > 1:
age = times[node]
freq = len(node.leaves()) / float(nleaves)
#sizes = [len(x.leaves()) for x in node.children]
#m = max(sizes)
#n = sum(sizes)
#pval = 2 * (n - m) / float(n - 1)
l.extend([color(*cmap.get(freq)), start, age, end, age])
if age > maxage2:
maxage2 = age
win.add_group(group(lines(*l), color(*bgcolor),
box(start, 0, end, maxage2, fill=True)))
if maxage2 > maxage:
maxage = maxage2
def func():
x, y = win.get_mouse_pos()
print "pos=%s age=%f" % (util.int2pretty(int(x)), y)
win.add_group(hotspot("click", 0, 0, end, maxage,
func))
win.home("exact")
return win
def show_coal_track2(tree_track):
win = summon.Window()
bgcolor = (1, 1, 1, .1)
cmap = util.rainbow_color_map(low=0.0, high=1.0)
maxage = 0
for (start, end), tree in tree_track:
print start
l = []
times = treelib.get_tree_timestamps(tree)
nleaves = len(tree.leaves())
maxage2 = 0
for node in tree:
if len(node.children) > 1:
age = times[node]
sizes = [len(x.leaves()) for x in node.children]
m = max(sizes)
n = sum(sizes)
pval = 2 * (n - m) / float(n - 1)
freq = len(node.leaves()) / float(nleaves)
l.extend([color(*cmap.get(freq)), start, age, end, age])
if age > maxage2:
maxage2 = age
win.add_group(group(lines(*l), color(*bgcolor),
box(start, 0, end, maxage2, fill=True)))
if maxage2 > maxage:
maxage = maxage2
def func():
x, y = win.get_mouse_pos()
print "pos=%s age=%f" % (util.int2pretty(int(x)), y)
win.add_group(hotspot("click", 0, 0, end, maxage,
func))
win.home("exact")
return win
def draw_tree(tree, layout, orient="vertical"):
vis = group()
bends = {}
for node in tree.postorder():
# get node coordinates
nx, ny = layout[node]
px, py = layout[node.parents[0]] if node.parents else (nx, ny)
# determine bend point
if orient == "vertical":
bends[node] = (nx, py)
else:
bends[node] = (px, ny)
# draw branch
vis.append(lines(nx, ny, bends[node][0], bends[node][1]))
# draw cross bar
if len(node.children) > 0:
a = bends[node.children[-1]]
b = bends[node.children[0]]
vis.append(lines(a[0], a[1], b[0], b[1]))
return vis
def draw_mark(x, y, col=(1,0,0), size=.5, func=None):
"""Draw a mark at (x, y)"""
if func:
h = hotspot("click", x-size, y-size, x+size, y+size, func)
else:
h = group()
return zoom_clamp(
color(*col),
box(x-size, y-size, x+size, y+size, fill=True),
h,
color(1,1,1),
origin=(x, y),
minx=10.0, miny=10.0, maxx=20.0, maxy=20.0,
link=True)
def mark_tree(tree, layout, name, y=None, time=None,
col=(1, 0, 0), yfunc=lambda y: y, size=.5):
nx, ny = layout[tree[name]]
if y is not None:
y += ny
else:
y = time
return draw_mark(nx, yfunc(y), col=col, size=size)
def draw_branch_mark(arg, layout, node=None, parent=None, pos=None,
chroms=None, age=None, col=(0,0,1)):
"""Draw a mark on a branch of an ARG"""
if node is None:
node = arglib.split_to_arg_branch(arg, pos, chroms)
if parent is None:
assert pos is not None
parent = arg.get_local_parent(node, pos)
if node and parent:
if age is None:
t = random.uniform(layout[node][1], layout[parent][1])
else:
t = layout[node][1] + (age - node.age)
nx, ny = layout[node]
return draw_mark(nx, t, col=col)
else:
return group()
def draw_branch(arg, layout, node=None, parent=None, chroms=None,
pos=None, col=None):
"""Draw a mark on a branch of an ARG"""
if node is None:
node = arglib.split_to_arg_branch(arg, pos, chroms)
if parent is None:
assert pos is not None
parent = arg.get_local_parent(node, pos)
if node and parent:
x1, y1, x2, y2 = get_branch_layout(layout, node, parent)
if col is None:
return lines(x1, y1, x2, y2)
else:
return lines(color(*col), x1, y1, x2, y2)
else:
return group()
#=============================================================================
# haplotype visualization
def inorder_tree(tree):
queue = [("queue", tree.root)]
while queue:
cmd, node = queue.pop()
if cmd == "visit":
yield node
elif cmd == "queue":
if node.is_leaf():
yield node
else:
queue.extend(
[("queue", node.children[1]),
("visit", node),
("queue", node.children[0])])
def layout_tree_leaves_even(tree):
layout = {}
y = 0
for node in inorder_tree(tree):
if node.is_leaf():
layout[node.name] = y
else:
y += 1
return layout
def layout_tree_leaves(tree):
layout = {}
y = 0
for node in inorder_tree(tree):
if node.is_leaf():
layout[node.name] = y
else:
#y += 1
y += (node.age / 1e3) + 1
#y += exp(node.age / 5e2) + 1
#y += log(node.age + 1) ** 3
vals = layout.values()
mid = (max(vals) + min(vals)) / 2.0
for k, v in layout.items():
layout[k] = (v - mid)
return layout
def layout_chroms(arg, start=None, end=None):
if start is None:
start = arg.start
if end is None:
end = arg.end
tree = arg.get_marginal_tree(start)
arglib.remove_single_lineages(tree)
last_pos = start
blocks = []
leaf_layout = []
layout_func = layout_tree_leaves
#layout_func = layout_tree_leaves_even
for spr in arglib.iter_arg_sprs(arg, start=start, end=end, use_leaves=True):
print "layout", spr[0]
blocks.append([last_pos, spr[0]])
leaf_layout.append(layout_func(tree))
inorder = dict((n, i) for i, n in enumerate(inorder_tree(tree)))
# determine SPR nodes
rnode = arglib.arg_lca(tree, spr[1][0], spr[0])
cnode = arglib.arg_lca(tree, spr[2][0], spr[0])
# determine best side for adding new sister
left = (inorder[rnode] < inorder[cnode])
# apply spr
arglib.apply_spr(tree, rnode, spr[1][1], cnode, spr[2][1], spr[0])
# adjust sister
rindex = rnode.parents[0].children.index(rnode)
if left and rindex != 0:
rnode.parents[0].children.reverse()
last_pos = spr[0]
blocks.append([last_pos, end])
leaf_layout.append(layout_func(tree))
return blocks, leaf_layout
def layout_tree_block(tree, names):
layout = {}
def walk(node):
if node.is_leaf():
x = names.index(node.name)
layout[node.name] = (x, x-.25, x+.25, node.age)
return x-.25, x+.25
else:
assert len(node.children) == 2
low1, high1 = walk(node.children[0])
low2, high2 = walk(node.children[1])
x = (min(high1, high2) + max(low1, low2)) / 2.0
low = min(low1, low2)
high = max(high1, high2)
layout[node.name] = (x, low, high, node.age)
return low, high
walk(tree.root)
return layout
def mouse_click(win):
print win.get_mouse_pos("world")
def chrom_click(win, chrom, block):
def func():
if win:
print chrom, block, win.get_mouse_pos("world")[0]
return func
def draw_arg_threads(arg, blocks, layout, sites=None,
chrom_colors=None, chrom_color=[.2,.2,.8,.8],
snp_colors={"compat": [1, 0, 0],
"noncompat": [0, 1, 0]},
get_snp_color=None,
spr_alpha=1,
spr_trim=10,
compat=False,
draw_group=None,
win=None):
leaf_names = set(arg.leaf_names())
# TEST:
rnodes = dict((r.pos, r) for r in arg if r.event == "recomb")
if draw_group is None:
draw_group = group()
# set chromosome color
if chrom_colors is None:
chrom_colors = {}
for name in leaf_names:
chrom_colors[name] = chrom_color
spr_colors = {}
for name in leaf_names:
spr_colors[name] = list(chrom_colors[name])
if len(spr_colors[name]) < 4:
spr_colors[name].append(1.0)
spr_colors[name][3] *= spr_alpha
trims = []
for k, (x1, x2) in enumerate(blocks):
# calc trims
length = x2 - x1
minlen = 0
spr_trim2 = min(spr_trim, (length - minlen) / 2.0)
trims.append((x1 + spr_trim2, x2 - spr_trim2))
trim = trims[-1]
# horizontal lines
l = []
for name in leaf_names:
c = chrom_colors[name]
y = layout[k][name]
l.extend([color(*c), trim[0], y, trim[1], y])
draw_group.append(lines(*l))
# SPRs
if k > 0:
l = []
# TEST:
#rnode = rnodes.get(x1, None)
#young = (rnode is not None and rnode.age < 500)
for name in leaf_names:
#c = [1,0,0] if young else spr_colors[name]
c = spr_colors[name]
y1 = layout[k-1][name]
y2 = layout[k][name]
l.extend([color(*c), trims[k-1][1], y1, trims[k][0], y2])
draw_group.append(lines(*l))
# hotspots
g = group()
for name in leaf_names:
y = layout[k][name]
g.append(hotspot("click", x1+spr_trim, y+.4, x2-spr_trim, y-.4,
chrom_click(win, name, (x1, x2))))
draw_group.append(g)
# SNPs
tree = None
if sites:
l = []
for pos, col in sites.iter_region(x1, x2):
split = set(sites.get_minor(pos)) & leaf_names
if len(split) == 0:
continue
if compat:
if tree is None:
tree = arg.get_marginal_tree((x1+x2)/2.0)
arglib.remove_single_lineages(tree)
node = arglib.split_to_arg_branch(tree, pos-.5, split)
if node is not None:
derived = list(tree.leaf_names(node))
c = color(*snp_colors["compat"])
else:
c = color(*snp_colors["noncompat"])
derived = split
else:
c = color(*snp_colors["compat"])
derived = split
if get_snp_color and derived:
allele = sites.get(pos, next(iter(derived)))
c = color(*get_snp_color(arg.chrom, pos, allele))
for d in derived:
if d in layout[k]:
y = layout[k][d]
l.extend([c, pos, y+.4, pos, y-.4])
draw_group.append(lines(*l))
return draw_group
|
mdrasmus/compbio
|
compbio/vis/argvis.py
|
Python
|
mit
| 29,840
|
[
"VisIt"
] |
38ee40830c968a44a7269b414f67185caf6877cfd603d87f36b48744be6105a1
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy.decorators import lazifyTestClass
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.misc.test_utils import ignore_warning
from hyperspy.misc.utils import slugify
class TestModelJacobians:
def setup_method(self, method):
s = hs.signals.Signal1D(np.zeros(1))
m = s.create_model()
self.low_loss = 7.0
self.weights = 0.3
m.axis.axis = np.array([1, 0])
m.channel_switches = np.array([0, 1], dtype=bool)
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1
m[0].centre.value = 2.0
m[0].sigma.twin = m[0].centre
m._low_loss = mock.MagicMock()
m.low_loss.return_value = self.low_loss
self.model = m
m.convolution_axis = np.zeros(2)
def test_jacobian_not_convolved(self):
m = self.model
m.convolved = False
jac = m._jacobian((1, 2, 3), None, weights=self.weights)
np.testing.assert_array_almost_equal(
jac.squeeze(),
self.weights
* np.array([m[0].A.grad(0), m[0].sigma.grad(0) + m[0].centre.grad(0)]),
)
assert m[0].A.value == 1
assert m[0].centre.value == 2
assert m[0].sigma.value == 2
def test_jacobian_convolved(self):
m = self.model
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[0].convolved = False
m[1].convolved = True
jac = m._jacobian((1, 2, 3, 4, 5), None, weights=self.weights)
np.testing.assert_array_almost_equal(
jac.squeeze(),
self.weights
* np.array(
[
m[0].A.grad(0),
m[0].sigma.grad(0) + m[0].centre.grad(0),
m[1].A.grad(0) * self.low_loss,
m[1].centre.grad(0) * self.low_loss,
m[1].sigma.grad(0) * self.low_loss,
]
),
)
assert m[0].A.value == 1
assert m[0].centre.value == 2
assert m[0].sigma.value == 2
assert m[1].A.value == 3
assert m[1].centre.value == 4
assert m[1].sigma.value == 5
class TestModelCallMethod:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m.append(hs.model.components1D.Gaussian())
self.model = m
def test_call_method_no_convolutions(self):
m = self.model
m.convolved = False
m[1].active = False
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2, r1)
np.testing.assert_allclose(m[0].function(0), r2)
m.convolved = True
r1 = m(non_convolved=True)
r2 = m(non_convolved=True, onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2, r1)
np.testing.assert_allclose(m[0].function(0), r2)
def test_call_method_with_convolutions(self):
m = self.model
m._low_loss = mock.MagicMock()
m.low_loss.return_value = 0.3
m.convolved = True
m.append(hs.model.components1D.Gaussian())
m[1].active = False
m[0].convolved = True
m[1].convolved = False
m[2].convolved = False
m.convolution_axis = np.array([0.0])
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_allclose(m[0].function(0) * 2.3, r1)
np.testing.assert_allclose(m[0].function(0) * 1.3, r2)
def test_call_method_binned(self):
m = self.model
m.convolved = False
m.remove(1)
m.signal.metadata.Signal.binned = True
m.signal.axes_manager[-1].scale = 0.3
r1 = m()
np.testing.assert_allclose(m[0].function(0) * 0.3, r1)
class TestModelPlotCall:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.__call__ = mock.MagicMock()
m.__call__.return_value = np.array([0.5, 0.25])
m.axis = mock.MagicMock()
m.fetch_stored_values = mock.MagicMock()
m.channel_switches = np.array([0, 1, 1, 0, 0], dtype=bool)
self.model = m
def test_model2plot_own_am(self):
m = self.model
m.axis.axis.shape = (5,)
res = m._model2plot(m.axes_manager)
np.testing.assert_array_equal(
res, np.array([np.nan, 0.5, 0.25, np.nan, np.nan])
)
assert m.__call__.called
assert m.__call__.call_args[1] == {"non_convolved": False, "onlyactive": True}
assert not m.fetch_stored_values.called
def test_model2plot_other_am(self):
m = self.model
res = m._model2plot(m.axes_manager.deepcopy(), out_of_range2nans=False)
np.testing.assert_array_equal(res, np.array([0.5, 0.25]))
assert m.__call__.called
assert m.__call__.call_args[1] == {"non_convolved": False, "onlyactive": True}
assert 2 == m.fetch_stored_values.call_count
class TestModelSettingPZero:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.1
m[0].centre._number_of_elements = 2
m[0].centre.value = (2.2, 3.3)
m[0].sigma.value = 4.4
m[0].sigma.free = False
m[0].A._bounds = (0.1, 0.11)
m[0].centre._bounds = ((0.2, 0.21), (0.3, 0.31))
m[0].sigma._bounds = (0.4, 0.41)
self.model = m
def test_setting_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m.p0 = None
m._set_p0()
assert m.p0 == (1.1, 2.2, 3.3)
def test_fetching_from_p0(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
m[-1].A.value = 100
m[-1].sigma.value = 200
m[-1].centre.value = 300
m.p0 = (1.2, 2.3, 3.4, 5.6, 6.7, 7.8)
m._fetch_values_from_p0()
assert m[0].A.value == 1.2
assert m[0].centre.value == (2.3, 3.4)
assert m[0].sigma.value == 4.4
assert m[1].A.value == 100
assert m[1].sigma.value == 200
assert m[1].centre.value == 300
def test_setting_boundaries(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
with pytest.warns(
VisibleDeprecationWarning,
match=r".* has been deprecated and will be made private",
):
m.set_boundaries()
assert m.free_parameters_boundaries == [(0.1, 0.11), (0.2, 0.21), (0.3, 0.31)]
def test_setting_mpfit_parameters_info(self):
m = self.model
m[0].A.bmax = None
m[0].centre.bmin = None
m[0].centre.bmax = 0.31
m.append(hs.model.components1D.Gaussian())
m[-1].active = False
with pytest.warns(
VisibleDeprecationWarning,
match=r".* has been deprecated and will be made private",
):
m.set_mpfit_parameters_info()
assert m.mpfit_parinfo == [
{"limited": [True, False], "limits": [0.1, 0]},
{"limited": [False, True], "limits": [0, 0.31]},
{"limited": [False, True], "limits": [0, 0.31]},
]
class TestModel1D:
def setup_method(self, method):
s = hs.signals.Signal1D(np.empty(1))
m = s.create_model()
self.model = m
def test_errfunc(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0
np.testing.assert_equal(m._errfunc(None, 1.0, None), 2.0)
np.testing.assert_equal(m._errfunc(None, 1.0, 0.3), 0.6)
def test_errfunc_sq(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0 * np.ones(2)
np.testing.assert_equal(m._errfunc_sq(None, np.ones(2), None), 8.0)
np.testing.assert_equal(m._errfunc_sq(None, np.ones(2), 0.3), 0.72)
def test_gradient_ls(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_ls(None, None), 2.8)
def test_gradient_ml(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.0 * np.ones(2)
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_ml(None, 1.2), 8.4)
def test_gradient_huber(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.0
np.testing.assert_allclose(m._gradient_huber(None, None), 1.4)
def test_model_function(self):
m = self.model
m.append(hs.model.components1D.Gaussian())
m[0].A.value = 1.3
m[0].centre.value = 0.003
m[0].sigma.value = 0.1
param = (100, 0.1, 0.2)
np.testing.assert_array_almost_equal(176.03266338, m._model_function(param))
assert m[0].A.value == 100
assert m[0].centre.value == 0.1
assert m[0].sigma.value == 0.2
def test_append_existing_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
with pytest.raises(ValueError, match="Component already in model"):
m.append(g)
def test_append_component(self):
g = hs.model.components1D.Gaussian()
m = self.model
m.append(g)
assert g in m
assert g.model is m
assert g._axes_manager is m.axes_manager
assert all([hasattr(p, "map") for p in g.parameters])
def test_calculating_convolution_axis(self):
m = self.model
# setup
m.axis.offset = 10
m.axis.size = 10
ll_axis = mock.MagicMock()
ll_axis.size = 7
ll_axis.value2index.return_value = 3
m._low_loss = mock.MagicMock()
m.low_loss.axes_manager.signal_axes = [
ll_axis,
]
# calculation
m.set_convolution_axis()
# tests
np.testing.assert_array_equal(m.convolution_axis, np.arange(7, 23))
np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0)
def test_access_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m["test"] is g2
def test_access_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m[1] is g2
def test_component_name_when_append(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
]
m.extend(gs)
assert m["Gaussian"] is gs[0]
assert m["Gaussian_0"] is gs[1]
assert m["Gaussian_1"] is gs[2]
def test_several_component_with_same_name(self):
m = self.model
gs = [
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
hs.model.components1D.Gaussian(),
]
m.extend(gs)
m[0]._name = "hs.model.components1D.Gaussian"
m[1]._name = "hs.model.components1D.Gaussian"
m[2]._name = "hs.model.components1D.Gaussian"
with pytest.raises(ValueError, match=r"Component name .* not found in model"):
m["Gaussian"]
def test_no_component_with_that_name(self):
m = self.model
with pytest.raises(ValueError, match=r"Component name .* not found in model"):
m["Voigt"]
def test_component_already_in_model(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
with pytest.raises(ValueError, match="Component already in model"):
m.extend((g1, g1))
def test_remove_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1)
assert len(m) == 0
def test_remove_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(0)
assert len(m) == 0
def test_remove_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
m.remove(g1.name)
assert len(m) == 0
def test_delete_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[0]
assert g1 not in m
def test_delete_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
del m[g1.name]
assert g1 not in m
def test_delete_slice(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g3 = hs.model.components1D.Gaussian()
g3.A.twin = g1.A
g1.sigma.twin = g2.sigma
m.extend([g1, g2, g3])
del m[:2]
assert g1 not in m
assert g2 not in m
assert g3 in m
assert not g1.sigma.twin
assert not g1.A._twins
def test_get_component_by_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component("test") is g2
def test_get_component_by_index(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component(1) is g2
def test_get_component_by_component(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
assert m._get_component(g2) is g2
def test_get_component_wrong(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
g2 = hs.model.components1D.Gaussian()
g2.name = "test"
m.extend((g1, g2))
with pytest.raises(ValueError, match="Not a component or component id"):
m._get_component(1.2)
def test_components_class_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
assert getattr(m.components, g1.name) is g1
def test_components_class_change_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
assert getattr(m.components, g1.name) is g1
def test_components_class_change_name_del_default(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "test"
with pytest.raises(AttributeError, match="object has no attribute 'Gaussian'"):
getattr(m.components, "Gaussian")
def test_components_class_change_invalid_name(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g1.name = "1, Test This!"
assert getattr(m.components, slugify(g1.name, valid_variable_name=True)) is g1
def test_components_class_change_name_del_default2(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
invalid_name = "1, Test This!"
g1.name = invalid_name
g1.name = "test"
with pytest.raises(AttributeError, match=r"object has no attribute .*"):
getattr(m.components, slugify(invalid_name))
def test_snap_parameter_bounds(self):
m = self.model
g1 = hs.model.components1D.Gaussian()
m.append(g1)
g2 = hs.model.components1D.Gaussian()
m.append(g2)
g3 = hs.model.components1D.Gaussian()
m.append(g3)
g4 = hs.model.components1D.Gaussian()
m.append(g4)
p = hs.model.components1D.Polynomial(3, legacy=False)
m.append(p)
g1.A.value = 3.0
g1.centre.bmin = 300.0
g1.centre.value = 1.0
g1.sigma.bmax = 15.0
g1.sigma.value = 30
g2.A.value = 1
g2.A.bmin = 0.0
g2.A.bmax = 3.0
g2.centre.value = 0
g2.centre.bmin = 1
g2.centre.bmax = 3.0
g2.sigma.value = 4
g2.sigma.bmin = 1
g2.sigma.bmax = 3.0
g3.A.bmin = 0
g3.A.value = -3
g3.A.free = False
g3.centre.value = 15
g3.centre.bmax = 10
g3.centre.free = False
g3.sigma.value = 1
g3.sigma.bmin = 0
g3.sigma.bmax = 0
g4.active = False
g4.A.value = 300
g4.A.bmin = 500
g4.centre.value = 0
g4.centre.bmax = -1
g4.sigma.value = 1
g4.sigma.bmin = 10
p.a0.value = 1
p.a1.value = 2
p.a2.value = 3
p.a3.value = 4
p.a0.bmin = 2
p.a1.bmin = 2
p.a2.bmin = 2
p.a3.bmin = 2
p.a0.bmax = 3
p.a1.bmax = 3
p.a2.bmax = 3
p.a3.bmax = 3
m.ensure_parameters_in_bounds()
np.testing.assert_allclose(g1.A.value, 3.0)
np.testing.assert_allclose(g2.A.value, 1.0)
np.testing.assert_allclose(g3.A.value, -3.0)
np.testing.assert_allclose(g4.A.value, 300.0)
np.testing.assert_allclose(g1.centre.value, 300.0)
np.testing.assert_allclose(g2.centre.value, 1.0)
np.testing.assert_allclose(g3.centre.value, 15.0)
np.testing.assert_allclose(g4.centre.value, 0)
np.testing.assert_allclose(g1.sigma.value, 15.0)
np.testing.assert_allclose(g2.sigma.value, 3.0)
np.testing.assert_allclose(g3.sigma.value, 0.0)
np.testing.assert_allclose(g4.sigma.value, 1)
np.testing.assert_almost_equal(p.a0.value, 2)
np.testing.assert_almost_equal(p.a1.value, 2)
np.testing.assert_almost_equal(p.a2.value, 3)
np.testing.assert_almost_equal(p.a3.value, 3)
class TestModelPrintCurrentValues:
def setup_method(self, method):
np.random.seed(1)
s = hs.signals.Signal1D(np.arange(10, 100, 0.1))
s.axes_manager[0].scale = 0.1
s.axes_manager[0].offset = 10
m = s.create_model()
with ignore_warning(message="The API of the `Polynomial` component"):
m.append(hs.model.components1D.Polynomial(1))
m.append(hs.model.components1D.Offset())
self.s = s
self.m = m
@pytest.mark.parametrize("only_free", [True, False])
@pytest.mark.parametrize("skip_multi", [True, False])
def test_print_current_values(self, only_free, skip_multi):
self.m.print_current_values(only_free, skip_multi)
def test_print_current_values_component_list(self):
self.m.print_current_values(component_list=list(self.m))
class TestStoreCurrentValues:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(10)).create_model()
self.o = hs.model.components1D.Offset()
self.m.append(self.o)
def test_active(self):
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
assert self.o.offset.map["values"][0] == 2
assert self.o.offset.map["is_set"][0]
def test_not_active(self):
self.o.active = False
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
assert self.o.offset.map["values"][0] != 2
class TestSetCurrentValuesTo:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(10).reshape(2, 5)).create_model()
self.comps = [hs.model.components1D.Offset(), hs.model.components1D.Offset()]
self.m.extend(self.comps)
def test_set_all(self):
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
assert (self.comps[0].offset.map["values"] == 2).all()
assert (self.comps[1].offset.map["values"] == 2).all()
def test_set_1(self):
self.comps[1].offset.value = 2
self.m.assign_current_values_to_all([self.comps[1]])
assert (self.comps[0].offset.map["values"] != 2).all()
assert (self.comps[1].offset.map["values"] == 2).all()
def test_fetch_values_from_arrays():
m = hs.signals.Signal1D(np.arange(10)).create_model()
gaus = hs.model.components1D.Gaussian(A=100, sigma=10, centre=3)
m.append(gaus)
values = np.array([1.2, 3.4, 5.6])
stds = values - 1
m.fetch_values_from_array(values, array_std=stds)
parameters = sorted(gaus.free_parameters, key=lambda x: x.name)
for v, s, p in zip(values, stds, parameters):
assert p.value == v
assert p.std == s
class TestAsSignal:
def setup_method(self, method):
self.m = hs.signals.Signal1D(np.arange(20).reshape(2, 2, 5)).create_model()
self.comps = [hs.model.components1D.Offset(), hs.model.components1D.Offset()]
self.m.extend(self.comps)
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
@pytest.mark.parallel
def test_threaded_identical(self):
# all components
s = self.m.as_signal(parallel=True)
s1 = self.m.as_signal(parallel=False)
np.testing.assert_allclose(s1.data, s.data)
# more complicated
self.m[0].active_is_multidimensional = True
self.m[0]._active_array[0] = False
for component in [0, 1]:
s = self.m.as_signal(component_list=[component], parallel=True)
s1 = self.m.as_signal(component_list=[component], parallel=False)
np.testing.assert_allclose(s1.data, s.data)
@pytest.mark.parametrize("parallel", [True, False])
def test_all_components_simple(self, parallel):
s = self.m.as_signal(parallel=parallel)
assert np.all(s.data == 4.0)
@pytest.mark.parametrize("parallel", [True, False])
def test_one_component_simple(self, parallel):
s = self.m.as_signal(component_list=[0], parallel=parallel)
assert np.all(s.data == 2.0)
assert self.m[1].active
@pytest.mark.parametrize("parallel", [True, False])
def test_all_components_multidim(self, parallel):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(parallel=parallel)
assert np.all(s.data == 4.0)
self.m[0]._active_array[0] = False
s = self.m.as_signal(parallel=parallel)
np.testing.assert_array_equal(
s.data, np.array([np.ones((2, 5)) * 2, np.ones((2, 5)) * 4])
)
assert self.m[0].active_is_multidimensional
@pytest.mark.parametrize("parallel", [True, False])
def test_one_component_multidim(self, parallel):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(component_list=[0], parallel=parallel)
assert np.all(s.data == 2.0)
assert self.m[1].active
assert not self.m[1].active_is_multidimensional
s = self.m.as_signal(component_list=[1], parallel=parallel)
np.testing.assert_equal(s.data, 2.0)
assert self.m[0].active_is_multidimensional
self.m[0]._active_array[0] = False
s = self.m.as_signal(component_list=[1], parallel=parallel)
assert np.all(s.data == 2.0)
s = self.m.as_signal(component_list=[0], parallel=parallel)
np.testing.assert_array_equal(
s.data, np.array([np.zeros((2, 5)), np.ones((2, 5)) * 2])
)
def test_as_signal_parallel():
np.random.seed(1)
s = hs.signals.Signal1D(np.random.random((50, 10)))
m = s.create_model()
m.append(hs.model.components1D.PowerLaw())
m.set_signal_range(2, 5)
# HyperSpy 2.0: remove setting iterpath='serpentine'
m.multifit(iterpath="serpentine")
s1 = m.as_signal(out_of_range_to_nan=True, parallel=True)
s2 = m.as_signal(out_of_range_to_nan=True, parallel=True)
np.testing.assert_allclose(s1, s2)
@lazifyTestClass
class TestCreateModel:
def setup_method(self, method):
self.s = hs.signals.Signal1D(np.asarray([0]))
self.im = hs.signals.Signal2D(np.ones([1, 1]))
def test_create_model(self):
from hyperspy.models.model1d import Model1D
from hyperspy.models.model2d import Model2D
assert isinstance(self.s.create_model(), Model1D)
assert isinstance(self.im.create_model(), Model2D)
class TestAdjustPosition:
def setup_method(self, method):
self.s = hs.signals.Signal1D(np.random.rand(10, 10, 20))
self.m = self.s.create_model()
def test_enable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
assert len(self.m._position_widgets) == 1
# Check that both line and label was added
assert len(list(self.m._position_widgets.values())[0]) == 2
def test_disable_adjust_position(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.disable_adjust_position()
assert len(self.m._position_widgets) == 0
def test_enable_all(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
assert len(self.m._position_widgets) == 2
def test_enable_all_zero_start(self):
self.m.enable_adjust_position()
self.m.append(hs.model.components1D.Gaussian())
assert len(self.m._position_widgets) == 1
def test_manual_close(self):
self.m.append(hs.model.components1D.Gaussian())
self.m.append(hs.model.components1D.Gaussian())
self.m.enable_adjust_position()
list(self.m._position_widgets.values())[0][0].close()
assert len(self.m._position_widgets) == 2
assert len(list(self.m._position_widgets.values())[0]) == 1
list(self.m._position_widgets.values())[0][0].close()
assert len(self.m._position_widgets) == 1
assert len(list(self.m._position_widgets.values())[0]) == 2
self.m.disable_adjust_position()
assert len(self.m._position_widgets) == 0
def test_deprecated_private_functions():
s = hs.signals.Signal1D(np.zeros(1))
m = s.create_model()
with pytest.warns(VisibleDeprecationWarning, match=r".* has been deprecated"):
m.set_boundaries()
with pytest.warns(VisibleDeprecationWarning, match=r".* has been deprecated"):
m.set_mpfit_parameters_info()
|
dnjohnstone/hyperspy
|
hyperspy/tests/model/test_model.py
|
Python
|
gpl-3.0
| 28,010
|
[
"Gaussian"
] |
5c436a3c2279a70a83d734ac222c870ef55b0ab67d265c15f668c3c5ef7e42b6
|
# disable missing docstring
# pylint: disable=missing-docstring
import json
from lettuce import world, step
from nose.tools import assert_equal, assert_true # pylint: disable=no-name-in-module
from common import type_in_codemirror, open_new_course
from advanced_settings import change_value, ADVANCED_MODULES_KEY
from course_import import import_file
DISPLAY_NAME = "Display Name"
MAXIMUM_ATTEMPTS = "Maximum Attempts"
PROBLEM_WEIGHT = "Problem Weight"
RANDOMIZATION = 'Randomization'
SHOW_ANSWER = "Show Answer"
SHOW_RESET_BUTTON = "Show Reset Button"
TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts"
MATLAB_API_KEY = "Matlab API key"
@step('I have created a Blank Common Problem$')
def i_created_blank_common_problem(step):
step.given('I am in Studio editing a new unit')
step.given("I have created another Blank Common Problem")
@step('I have created a unit with advanced module "(.*)"$')
def i_created_unit_with_advanced_module(step, advanced_module):
step.given('I am in Studio editing a new unit')
url = world.browser.url
step.given("I select the Advanced Settings")
change_value(step, ADVANCED_MODULES_KEY, '["{}"]'.format(advanced_module))
world.visit(url)
world.wait_for_xmodule()
@step('I have created an advanced component "(.*)" of type "(.*)"')
def i_create_new_advanced_component(step, component_type, advanced_component):
world.create_component_instance(
step=step,
category='advanced',
component_type=component_type,
advanced_component=advanced_component
)
@step('I have created another Blank Common Problem$')
def i_create_new_common_problem(step):
world.create_component_instance(
step=step,
category='problem',
component_type='Blank Common Problem'
)
@step('when I mouseover on "(.*)"')
def i_mouseover_on_html_component(step, element_class):
action_css = '.{}'.format(element_class)
world.trigger_event(action_css, event='mouseover')
@step(u'I can see Reply to Annotation link$')
def i_see_reply_to_annotation_link(_step):
css_selector = 'a.annotatable-reply'
world.wait_for_visible(css_selector)
@step(u'I see that page has scrolled "(.*)" when I click on "(.*)" link$')
def i_see_annotation_problem_page_scrolls(_step, scroll_direction, link_css):
scroll_js = "$(window).scrollTop();"
scroll_height_before = world.browser.evaluate_script(scroll_js)
world.css_click("a.{}".format(link_css))
scroll_height_after = world.browser.evaluate_script(scroll_js)
if scroll_direction == "up":
assert scroll_height_after < scroll_height_before
elif scroll_direction == "down":
assert scroll_height_after > scroll_height_before
@step('I have created an advanced problem of type "(.*)"$')
def i_create_new_advanced_problem(step, component_type):
world.create_component_instance(
step=step,
category='problem',
component_type=component_type,
is_advanced=True
)
@step('I edit and select Settings$')
def i_edit_and_select_settings(_step):
world.edit_component_and_select_settings()
@step('I see the advanced settings and their expected values$')
def i_see_advanced_settings_with_values(step):
world.verify_all_setting_entries(
[
[DISPLAY_NAME, "Blank Common Problem", True],
[MATLAB_API_KEY, "", False],
[MAXIMUM_ATTEMPTS, "", False],
[PROBLEM_WEIGHT, "", False],
[RANDOMIZATION, "Never", False],
[SHOW_ANSWER, "Finished", False],
[SHOW_RESET_BUTTON, "False", False],
[TIMER_BETWEEN_ATTEMPTS, "0", False],
])
@step('I can modify the display name')
def i_can_modify_the_display_name(_step):
# Verifying that the display name can be a string containing a floating point value
# (to confirm that we don't throw an error because it is of the wrong type).
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, '3.4')
verify_modified_display_name()
@step('my display name change is persisted on save')
def my_display_name_change_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name()
@step('the problem display name is "(.*)"$')
def verify_problem_display_name(step, name):
assert_equal(name.upper(), world.browser.find_by_css('.problem-header').text)
@step('I can specify special characters in the display name')
def i_can_modify_the_display_name_with_special_chars(_step):
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, "updated ' \" &")
verify_modified_display_name_with_special_chars()
@step('I can specify html in the display name and save')
def i_can_modify_the_display_name_with_html(_step):
"""
If alert appear on save then UnexpectedAlertPresentException
will occur and test will fail.
"""
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, "<script>alert('test')</script>")
verify_modified_display_name_with_html()
world.save_component()
@step('my special characters and persisted on save')
def special_chars_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name_with_special_chars()
@step('I can revert the display name to unset')
def can_revert_display_name_to_unset(_step):
world.revert_setting_entry(DISPLAY_NAME)
verify_unset_display_name()
@step('my display name is unset on save')
def my_display_name_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_unset_display_name()
@step('I can select Per Student for Randomization')
def i_can_select_per_student_for_randomization(_step):
world.browser.select(RANDOMIZATION, "Per Student")
verify_modified_randomization()
@step('my change to randomization is persisted')
def my_change_to_randomization_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_randomization()
@step('I can revert to the default value for randomization')
def i_can_revert_to_default_for_randomization(step):
world.revert_setting_entry(RANDOMIZATION)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Never", False)
@step('I can set the weight to "(.*)"?')
def i_can_set_weight(_step, weight):
set_weight(weight)
verify_modified_weight()
@step('my change to weight is persisted')
def my_change_to_weight_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_weight()
@step('I can revert to the default value of unset for weight')
def i_can_revert_to_default_for_unset_weight(step):
world.revert_setting_entry(PROBLEM_WEIGHT)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the weight to "(.*)", it remains unset')
def set_the_weight_to_abc(step, bad_weight):
set_weight(bad_weight)
# We show the clear button immediately on type, hence the "True" here.
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", True)
world.save_component_and_reopen(step)
# But no change was actually ever sent to the model, so on reopen, explicitly_set is False
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the max attempts to "(.*)", it will persist as a valid integer$')
def set_the_max_attempts(step, max_attempts_set):
# on firefox with selenium, the behavior is different.
# eg 2.34 displays as 2.34 and is persisted as 2
index = world.get_setting_entry_index(MAXIMUM_ATTEMPTS)
world.set_field_value(index, max_attempts_set)
world.save_component_and_reopen(step)
value = world.css_value('input.setting-input', index=index)
assert value != "", "max attempts is blank"
assert int(value) >= 0
@step('Edit High Level Source is not visible')
def edit_high_level_source_not_visible(step):
verify_high_level_source_links(step, False)
@step('Edit High Level Source is visible')
def edit_high_level_source_links_visible(step):
verify_high_level_source_links(step, True)
@step('If I press Cancel my changes are not persisted')
def cancel_does_not_save_changes(step):
world.cancel_component(step)
step.given("I edit and select Settings")
step.given("I see the advanced settings and their expected values")
@step('I have enabled latex compiler')
def enable_latex_compiler(step):
url = world.browser.url
step.given("I select the Advanced Settings")
change_value(step, 'Enable LaTeX Compiler', 'true')
world.visit(url)
world.wait_for_xmodule()
@step('I have created a LaTeX Problem')
def create_latex_problem(step):
step.given('I am in Studio editing a new unit')
step.given('I have enabled latex compiler')
world.create_component_instance(
step=step,
category='problem',
component_type='Problem Written in LaTeX',
is_advanced=True
)
@step('I edit and compile the High Level Source')
def edit_latex_source(_step):
open_high_level_source()
type_in_codemirror(1, "hi")
world.css_click('.hls-compile')
@step('my change to the High Level Source is persisted')
def high_level_source_persisted(_step):
def verify_text(driver):
css_sel = '.problem div>span'
return world.css_text(css_sel) == 'hi'
world.wait_for(verify_text, timeout=10)
@step('I view the High Level Source I see my changes')
def high_level_source_in_editor(_step):
open_high_level_source()
assert_equal('hi', world.css_value('.source-edit-box'))
@step(u'I have an empty course')
def i_have_empty_course(step):
open_new_course()
@step(u'I import the file "([^"]*)"$')
def i_import_the_file(_step, filename):
import_file(filename)
@step(u'I go to the vertical "([^"]*)"$')
def i_go_to_vertical(_step, vertical):
world.css_click("span:contains('{0}')".format(vertical))
@step(u'I go to the unit "([^"]*)"$')
def i_go_to_unit(_step, unit):
loc = "window.location = $(\"span:contains('{0}')\").closest('a').attr('href')".format(unit)
world.browser.execute_script(loc)
@step(u'I see a message that says "([^"]*)"$')
def i_can_see_message(_step, msg):
msg = json.dumps(msg) # escape quotes
world.css_has_text("h2.title", msg)
@step(u'I can edit the problem$')
def i_can_edit_problem(_step):
world.edit_component()
@step(u'I edit first blank advanced problem for annotation response$')
def i_edit_blank_problem_for_annotation_response(_step):
world.edit_component(1)
text = """
<problem>
<annotationresponse>
<annotationinput><text>Text of annotation</text></annotationinput>
</annotationresponse>
</problem>"""
type_in_codemirror(0, text)
world.save_component()
@step(u'I can see cheatsheet$')
def verify_cheat_sheet_displaying(_step):
world.css_click("a.cheatsheet-toggle")
css_selector = 'article.simple-editor-cheatsheet'
world.wait_for_visible(css_selector)
def verify_high_level_source_links(step, visible):
if visible:
assert_true(world.is_css_present('.launch-latex-compiler'),
msg="Expected to find the latex button but it is not present.")
else:
assert_true(world.is_css_not_present('.launch-latex-compiler'),
msg="Expected not to find the latex button but it is present.")
world.cancel_component(step)
def verify_modified_weight():
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "3.5", True)
def verify_modified_randomization():
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Per Student", True)
def verify_modified_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, '3.4', True)
def verify_modified_display_name_with_special_chars():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, "updated ' \" &", True)
def verify_modified_display_name_with_html():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, "<script>alert('test')</script>", True)
def verify_unset_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, 'Blank Advanced Problem', False)
def set_weight(weight):
index = world.get_setting_entry_index(PROBLEM_WEIGHT)
world.set_field_value(index, weight)
def open_high_level_source():
world.edit_component()
world.css_click('.launch-latex-compiler > a')
|
ahmadiga/min_edx
|
cms/djangoapps/contentstore/features/problem-editor.py
|
Python
|
agpl-3.0
| 12,757
|
[
"VisIt"
] |
b74447e2c36dfd36dea980def5d93cb3694833b183998e776783692d9cc61e78
|
from typing import (
AbstractSet,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import django.db.utils
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat
from analytics.models import StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import bugdown
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bugdown import version as bugdown_version
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import (
BugdownRenderingException,
ErrorCode,
JsonableError,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.message import (
MessageDict,
access_message,
render_markdown,
truncate_body,
truncate_topic,
update_first_visible_message_id,
)
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_for_send_message,
check_stream_name,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileFieldData
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from zerver.models import (
MAX_MESSAGE_LENGTH,
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
EmailChangeStatus,
Message,
MultiuseInvite,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
ScheduledEmail,
ScheduledMessage,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
email_to_username,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_stream,
get_stream_by_id_in_realm,
get_stream_cache_key,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
query_for_ids,
realm_filters_for_realm,
stream_name_in_use,
validate_attachment_request,
)
from zerver.tornado.event_queue import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import update_license_ledger_if_needed, downgrade_now
import datetime
import itertools
import logging
import os
import platform
import time
from collections import defaultdict
from operator import itemgetter
import ujson
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {
'id': user_id,
'flags': ['read']
}
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(
stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0,
UserProfile.ROLE_REALM_OWNER: 0,
UserProfile.ROLE_MEMBER: 0,
UserProfile.ROLE_GUEST: 0}
for value_dict in list(UserProfile.objects.filter(
realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))):
human_counts[value_dict['role']] = value_dict['role__count']
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def notify_new_user(user_profile: UserProfile) -> None:
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email)
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
internal_send_stream_message(
user_profile.realm,
sender,
signup_notifications_stream,
"signups",
f"@_**{user_profile.full_name}|{user_profile.id}** just signed up for Zulip. (total: {user_count})",
)
# We also send a notification to the Zulip administrative realm
admin_realm = sender.realm
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
user_profile.realm.display_subdomain,
f"{user_profile.full_name} <`{user_profile.email}`> just signed up for Zulip! (total: **{user_count}**)",
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in
user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(recipient_id__in=recipient_ids,
date_sent__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list(
'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Mapping[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
prereg_user.status = confirmation_settings.STATUS_ACTIVE
prereg_user.save(update_fields=['status'])
streams = prereg_user.streams.all()
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
f"{user_profile.full_name} <`{user_profile.email}`> accepted your invitation to join Zulip!",
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(
email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\
.update(status=confirmation_settings.STATUS_REVOKED)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\
.update(status=confirmation_settings.STATUS_REVOKED)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.delivery_email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(user_profile.realm, user_profile, user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar=False :(
client_gravatar=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={})
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner_id'] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
short_name: str, bot_type: Optional[int]=None, role: Optional[int]=None,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
role=role, bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any) -> None:
"""Takes in a realm object, the name of an attribute to update, and the
value to update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
f'Cannot update {name}: {value} is not an instance of {property_type}')
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile, realm)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
UserProfile.objects.bulk_update(user_profiles, ['email'])
for user_profile in user_profiles:
flush_user_profile(sender=UserProfile, instance=user_profile)
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool]) -> None:
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=realm.authentication_methods_dict()),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
],
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Stream, stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Stream,
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(type="realm_user", op="remove",
person=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has already been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id,
new_email=user_profile.email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id,
delivery_email=new_email)
event = dict(type='realm_user', op='update', person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url,
})
language = user_profile.default_language
send_email('zerver/emails/confirm_new_email', to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language, context=context)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
short_name=email_to_username(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient_id = message['message'].sender.recipient_id
if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2:
content = (
_("Congratulations on your first reply!") +
" "
":tada:"
"\n"
"\n" +
_("Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
)
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender, content)
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[bugdown.MentionData]=None,
email_gateway: bool=False) -> str:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton = realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except BugdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
push_notify_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int]=set(),
possible_wildcard_mention: bool=True) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = stream_topic.get_active_subscriptions().annotate(
user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'),
user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'),
user_profile_wildcard_mentions_notify=F(
'user_profile__wildcard_mentions_notify'),
).values(
'user_profile_id',
'push_notifications',
'email_notifications',
'wildcard_mentions_notify',
'user_profile_email_notifications',
'user_profile_push_notifications',
'user_profile_wildcard_mentions_notify',
'is_muted',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row['is_muted']:
return False
if row['user_profile_id'] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row['user_profile_' + setting]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send('push_notifications', row)
}
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send('email_notifications', row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row['user_profile_id']
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered bugdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id',
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications'],
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle'],
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via bugdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
)
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s',
user_profile_id, bot_type,
)
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: bool=False,
mark_as_read: Sequence[int]=[]) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids: List[int] = []
new_messages: List[MutableMapping[str, Any]] = []
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed: Set[str] = set()
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = bugdown.MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = bugdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if message['message'].mentions_wildcard:
message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids']
else:
message['wildcard_mention_user_ids'] = []
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
# Claim attachments in message
for message in messages:
if do_claim_attachments(message['message'],
message['message'].potential_attachment_path_ids):
message['message'].has_attachment = True
message['message'].save(update_fields=['has_attachment'])
ums: List[UserMessageLite] = []
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
mark_as_read=mark_as_read,
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
realm_id: Optional[int] = None
if message['message'].is_stream_message():
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related().get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
realm_id = message['stream'].realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'], realm_id)
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['stream'].first_message_id is None:
message['stream'].first_message_id = message['message'].id
message['stream'].save(update_fields=["first_message_id"])
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
},
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read: Sequence[int] = []) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the bugdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if (um.user_profile_id == message.sender.id and
message.sent_by_human()) or \
um.user_profile_id in mark_as_read:
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = [
(um.user_profile_id, um.message_id, um.flags)
for um in ums
]
query = SQL('''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
''')
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event: Dict[str, Any] = {
'type': 'reaction',
'op': op,
'user_id': user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
(emoji_code, reaction_type) = emoji_name_to_emoji_code(user_profile.realm, emoji_name)
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm,
sender: UserProfile,
recipient_user_profiles: List[UserProfile],
operator: str) -> None:
sender_dict = {'user_id': sender.id, 'email': sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type='typing',
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [
user.id
for user in recipient_user_profiles
if user.is_active
]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile,
user_ids: List[int],
operator: str) -> None:
realm = sender.realm
if len(user_ids) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
''' The next chunk of code will go away when we upgrade old mobile
users away from versions of mobile that send emails. For the
small number of very outdated mobile clients, we do double work
here in terms of fetching users, but this structure reduces lots
of other unnecessary duplicated code and will make it convenient
to mostly delete code when we desupport old versions of the
app.'''
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(
user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="") -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description)[0]
def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {}
for user_profile in recipient_profiles:
recipient_profiles_map[user_profile.id] = user_profile
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map):
del recipient_profiles_map[sender.id]
assert len(recipient_profiles_map) != 0
if len(recipient_profiles_map) == 1:
user_profile = list(recipient_profiles_map.values())[0]
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids: Set[int] = {user_id for user_id in recipient_profiles_map}
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
sender: UserProfile,
allow_deactivated: bool=False) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy and
not allow_deactivated) or user_profile.realm.deactivated:
raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email))
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile, allow_deactivated: bool=False) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(user_profiles, sender,
allow_deactivated=allow_deactivated)
return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Iterable[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str, realm: Optional[Realm]=None) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None,
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
recipient = message['message'].recipient
if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and
recipient.type_id != sender.id)):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([message])[0]
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str]=None,
stream_id: Optional[int]=None) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": sender.delivery_email,
"stream_id": stream_id,
"stream_name": stream_name,
}
if stream is None:
if stream_id is not None:
content = _("Your bot `{bot_identity}` tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID.").format(**arg_dict)
else:
assert(stream_name is not None)
content = _("Your bot `{bot_identity}` tried to send a message to stream "
"#**{stream_name}**, but that stream does not exist. "
"Click [here](#streams/new) to create it.").format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _("Your bot `{bot_identity}` tried to send a message to "
"stream #**{stream_name}**. The stream exists but "
"does not have any subscribers.").format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm,
sender: UserProfile) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm,
sender: UserProfile) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(realm: Realm, sender: UserProfile,
user_profiles: Sequence[UserProfile]) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
recipient = stream.recipient
# This will raise JsonableError if there are problems.
if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT:
access_stream_for_send_message(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
check_private_message_policy(realm, sender, user_profiles)
# API Super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles,
forwarded_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
error_msg = check_widget_content(widget_content)
if error_msg:
raise JsonableError(_('Widgets: {error_msg}').format(
error_msg=error_msg,
))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
if realm is None:
raise RuntimeError("None is not a valid realm for internal_prep_message!")
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name)
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg)
return None
def internal_prep_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[int]:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
realm: Realm,
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool=False) -> Optional[int]:
message = internal_prep_stream_message(
realm, sender, stream,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message], email_gateway=email_gateway)
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm, sender, stream_name,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(cast(UserProfile, user_profile), stream.id))
def validate_user_access_to_subscribers_helper(user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[], bool]) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed():
return
# We could put an AssertionError here; in that we don't have
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed()):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_recipient.populate_with(stream_id=stream_dict["id"],
recipient_id=stream_dict["recipient_id"])
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
query = SQL('''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
''')
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True,
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
sub_dicts = []
for (subscription, stream) in sub_pairs:
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
if field_name == "active":
# Skip the "active" field, it's implied by context
continue
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict['in_home_view'] = not subscription.is_muted
sub_dict['email_address'] = encode_email_address(stream, show_sender=True)
sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic)
sub_dict['subscribers'] = stream_user_ids(stream)
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add",
subscriptions=sub_dicts)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
# Realm admins can access all private stream subscribers. Send them an
# event even if they aren't subscribed to stream.
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list)
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str]={},
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams}
recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()]
stream_map: Dict[int, Stream] = {}
for stream in streams:
stream_map[recipients_map[stream.id]] = stream
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
realm = users[0].realm
already_subscribed: List[Tuple[UserProfile, Stream]] = []
subs_to_activate: List[Tuple[Subscription, Stream]] = []
new_subs: List[Tuple[UserProfile, int, Stream]] = []
for user_profile in users:
needs_new_sub: Set[int] = set(recipient_ids)
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add: List[Tuple[Subscription, Stream]] = []
for (user_profile, recipient_id, stream) in new_subs:
if stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(realm, event, active_user_ids(realm.id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list)
new_streams: Set[Tuple[int, int]] = set()
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_id=new_user_id)
send_event(realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path('audio/notification_sounds')
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if '.' in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == '.ogg':
available_notification_sounds.append(root)
return available_notification_sounds
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate: List[Tuple[Subscription, Stream]] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
subscriptions=[stream.name],
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any,
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
log_subscription_property_change(user_profile.email, stream.name,
database_property_name, database_value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=event_property_name,
value=event_value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(
user_id=user_profile.id,
)),
{previous_owner.id})
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id})
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event: Dict[str, Any] = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
api_key=new_api_key,
)),
bot_owner_user_ids(user_profile))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time)
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_logo_source(realm: Realm, logo_source: str, night: bool) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm, event_time=timezone_now())
event = dict(type='realm',
op='update_dict',
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night))
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(realm: Realm, plan_type: int) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb'])
event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type,
'extra_data': {'upload_quota': realm.upload_quota_bytes()}}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_user_role(user_profile: UserProfile, value: int) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(user_id=user_profile.id, role=user_profile.role))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None:
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=['stream_post_policy'])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream,
new_name: str,
user_profile: UserProfile,
log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
internal_send_stream_message(
stream.realm,
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_('@_**{user_name}|{user_id}** renamed stream **{old_stream_name}** to '
'**{new_stream_name}**.').format(
user_name=user_profile.full_name,
user_id=user_profile.id,
old_stream_name=old_name,
new_stream_name=new_name,
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=['description', 'rendered_description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:")
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.")
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
# Send a notification to the admin realm
signup_message = "Signups enabled"
sender = get_system_bot(settings.NOTIFICATION_BOT)
admin_realm = sender.realm
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str,
value: Union[bool, int, str], log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
f'Cannot update {name}: {value} is not an instance of {notification_setting_type}')
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group {}').format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name))
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name))
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
if stream in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related().filter(realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile_id: int,
client_id: int,
query: str,
count: int,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id = user_profile_id,
client_id = client_id,
query = query,
defaults={'last_visit': log_time, 'count': count})
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile: UserProfile, client: Client,
pointer: int, update_flags: bool=False) -> None:
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags: # nocoverage
# This block of code is compatibility code for the
# legacy/original Zulip Android app natively. It's a shim
# that will mark as read any messages up until the pointer
# move; we expect to remove this feature entirely before long,
# when we drop support for the old Android app entirely.
app_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[
UserMessage.where_unread(),
UserMessage.where_active_push_notification(),
]).values_list("message_id", flat=True)
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[UserMessage.where_unread()]) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
do_clear_mobile_push_notifications_for_ids(user_profile, app_message_ids)
event_time = timezone_now()
count = len(app_message_ids)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
event = dict(type='pointer', pointer=pointer)
send_event(user_profile.realm, event, [user_profile.id])
def do_update_user_status(user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int) -> None:
if away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type='user_status',
user_id=user_profile.id,
)
if away is not None:
event['away'] = away
if status_text is not None:
event['status_text'] = status_text
send_event(realm, event, active_user_ids(realm.id))
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
while True:
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
if len(all_push_message_ids) == 0:
break
do_clear_mobile_push_notifications_for_ids(user_profile, all_push_message_ids)
msgs = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
recipient = stream.recipient
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids(user_profile, message_ids)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_clear_mobile_push_notifications_for_ids(user_profile: UserProfile,
message_ids: List[int]) -> None:
filtered_message_ids = list(UserMessage.objects.filter(
message_id__in=message_ids,
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list('message_id', flat=True))
num_detached = settings.MAX_UNBATCHED_REMOVE_NOTIFICATIONS - 1
for message_id in filtered_message_ids[:num_detached]:
# Older clients (all clients older than 2019-02-13) will only
# see the first message ID in a given notification-message.
# To help them out, send a few of these separately.
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile.id,
"message_ids": [message_id],
})
if filtered_message_ids[num_detached:]:
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile.id,
"message_ids": filtered_message_ids[num_detached:],
})
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags
if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids(user_profile, messages)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def notify_topic_moved_streams(user_profile: UserProfile,
old_stream: Stream, old_topic: str,
new_stream: Stream, new_topic: Optional[str],
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT)
if new_topic is None:
new_topic = old_topic
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if send_notification_to_new_thread:
internal_send_stream_message(
new_stream.realm, sender, new_stream, new_topic,
_("This topic was moved here from {old_location} by {user}").format(
old_location=old_topic_link, user=user_mention,
),
)
if send_notification_to_old_thread:
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
old_stream.realm, sender, old_stream, old_topic,
_("This topic was moved by {user} to {new_location}").format(
user=user_mention, new_location=new_topic_link,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event: Dict[str, Any] = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id}
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
sender_id: int
recipient_id: int
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message,
new_stream: Optional[Stream], topic_name: Optional[str],
propagate_mode: str, send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int], mention_data: Optional[bugdown.MentionData]=None) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
message.last_edit_time = timestamp
event: Dict[str, Any] = {
'type': 'update_message',
'user_id': user_profile.id,
'edit_timestamp': datetime_to_timestamp(timestamp),
'message_id': message.id,
}
edit_history_event: Dict[str, Any] = {
'user_id': user_profile.id,
'timestamp': event['edit_timestamp'],
}
changed_messages = [message]
stream_being_edited = None
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm)
event['stream_name'] = stream_being_edited.name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
assert rendered_content is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in message.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
message.mentions_user_ids.update(members)
update_user_message_flags(message, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
# message.has_image and message.has_link will have been
# already updated by bugdown rendering in the caller.
message.has_attachment = check_attachment_reference_change(message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if message.mentions_wildcard:
event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids'])
else:
event['wildcard_mention_user_ids'] = []
if topic_name is not None or new_stream is not None:
orig_topic_name = message.topic_name()
event["propagate_mode"] = propagate_mode
event["stream_id"] = message.recipient.type_id
if new_stream is not None:
assert content is None
assert message.is_stream_message()
assert stream_being_edited is not None
edit_history_event['prev_stream'] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
if topic_name is not None:
topic_name = truncate_topic(topic_name)
message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = bugdown.topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
)
changed_messages += messages_list
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event['message_ids'] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already in users_to_be_notified list.
# This is the case where a user both has a UserMessage row and is a current Subscriber
subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums])
# All users that are subscribed to the stream must be notified when a message is edited
subscribers_ids = [user.user_profile_id for user in subscribers]
users_to_be_notified += list(map(subscriber_info, subscribers_ids))
send_event(user_profile.realm, event, users_to_be_notified)
if (len(changed_messages) > 0 and new_stream is not None and
stream_being_edited is not None):
# Notify users that the topic was moved.
notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name,
new_stream, topic_name, send_notification_to_old_thread,
send_notification_to_new_thread)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
# TODO: We should plan to remove `sender_id` here.
event['recipient_id'] = sample_message.recipient_id
event['sender_id'] = sample_message.sender_id
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event['stream_id'] = stream_id
event['topic'] = sample_message.topic_name()
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
subscribers_ids = [user.user_profile_id for user in subscribers]
users_to_notify = list(map(subscriber_info, subscribers_ids))
move_messages_to_archive(message_ids, realm=realm)
event['message_type'] = message_type
send_event(realm, event, users_to_notify)
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id'))
if message_ids:
move_messages_to_archive(message_ids)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict['is_muted'] = False
stream_dict['color'] = get_next_color()
stream_dict['desktop_notifications'] = True
stream_dict['audible_notifications'] = True
stream_dict['push_notifications'] = True
stream_dict['email_notifications'] = True
stream_dict['pin_to_top'] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{})
stream_dict['stream_weekly_traffic'] = stream_weekly_traffic
stream_dict['email_address'] = ''
subscribed.append(stream_dict)
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
*Subscription.API_FIELDS, "recipient_id").order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids: Set[int] = set()
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values(
*Stream.API_FIELDS,
# date_created is used as an input for the stream_weekly_traffic computed field.
"date_created",
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts}
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient,
)
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
# Copy Subscription.API_FIELDS except for "active", which is
# used to determine where to the put the field.
for field_name in Subscription.API_FIELDS:
stream_dict[field_name] = sub[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
stream_dict['in_home_view'] = not stream_dict['is_muted']
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
# Add a few computed fields not directly from the data models.
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
stream_dict['email_address'] = encode_email_address_helper(
stream["name"], stream["email_token"], show_sender=True)
# Construct and add subscribers data
subscribers: Optional[List[int]] = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore (or a realm administrator).
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
if subscribers is not None:
stream_dict['subscribers'] = subscribers
# is_active is represented in this structure by which list we include it in.
is_active = stream_dict.pop("active")
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
# Backwards-compatibility addition of removed field.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool=False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, _ = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([
email_dict[user_id] for user_id in sub['subscribers']
])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions, alert words, or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags: Iterable[str] = user_flags.get(user_id, [])
mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags
private_message = is_pm and user_id != sender_id
alerted = 'has_alert_word' in flags
if mentioned or private_message or alerted:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = f"{referrer.full_name} (via Zulip)"
send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language, context=context)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str, bool]],
sent_invitations: bool) -> None:
self._msg: str = msg
self.errors: List[Tuple[str, str, bool]] = errors
self.sent_invitations: bool = sent_invitations
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
'''Discourage using invitation emails as a vector for carrying spam.'''
msg = _("You do not have enough remaining invites. "
"Please contact {email} to have your limit raised. "
"No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == '':
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
'''
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
'''
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as=invite_as,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
ref=invitee.referred_by.email,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False))
lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm,
type=Confirmation.MULTIUSE_INVITE,
date_sent__gte=lowest_datetime)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(dict(ref=invite.referred_by.email,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE),
invited_as=invite.invited_as,
is_multiuse=True))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int,
streams: Sequence[Stream] = []) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type,
object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str,
date_muted: Optional[datetime.datetime]=None) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
exists_expression = Exists(
Subscription.objects.filter(active=True, user_profile__is_active=True,
user_profile__realm=realm,
recipient_id=OuterRef('recipient_id')),
)
occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \
.annotate(occupied=exists_expression).filter(occupied=True)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False, include_owner_subscribed: bool=False,
) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id, path_id, message.id,
)
continue
claimed = True
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(message: Message) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(message.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(realm: Realm,
field_subtype: str) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
field = CustomProfileField(realm=realm, name=field_data['name'],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data['hint'],
field_data=ujson.dumps(dict(subtype=field_subtype)))
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: Optional[ProfileFieldData]=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: Optional[ProfileFieldData]=None) -> None:
field.name = name
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
data = dict(id=field['id'])
if field['type'] == CustomProfileField.USER:
data["value"] = ujson.dumps(field['value'])
else:
data['value'] = field['value']
if field['rendered_value']:
data['rendered_value'] = field['rendered_value']
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]],
) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile,
field_id=field['id'])
if not created and field_value.value == str(field['value']):
# If the field value isn't actually being changed to a different one,
# and always_notify is disabled, we have nothing to do here for this field.
# Note: field_value.value is a TextField() so we need to cast field['value']
# to a string for the comparison in this if.
continue
field_value.value = field['value']
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(str(field['value']))
field_value.save(update_fields=['value', 'rendered_value'])
else:
field_value.save(update_fields=['value'])
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type})
def check_remove_custom_profile_field_value(user_profile: UserProfile,
field_id: Union[int, str, List[int]],
) -> None:
try:
field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile)
field_value.delete()
notify_user_update_custom_profile_data(user_profile, {'id': field_id,
'value': None,
'rendered_value': None,
'type': field.field_type})
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: str) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name,
}]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name,
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner_id': botdict['bot_owner__id'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
context = {'confirmation_url': url,
'realm_uri': realm.uri,
'realm_name': realm.name}
language = realm.default_language
send_email_to_admins(
'zerver/emails/realm_reactivation', realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language, context=context)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm, dict(type="has_zoom_token", value=token is not None), [user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type='realm_export',
exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `ujson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = ujson.loads(export_extra_data)
export_path = export_data.get('export_path')
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update({'deleted_timestamp': timezone_now().timestamp()})
export.extra_data = ujson.dumps(export_data)
export.save(update_fields=['extra_data'])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream,
topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
|
shubhamdhama/zulip
|
zerver/lib/actions.py
|
Python
|
apache-2.0
| 251,567
|
[
"Octopus"
] |
08b43619b44d265c14f0d311c7bde51e5f0e1ccebdad0289f8b2262cab19eb22
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Michael Rabbitt, Roberto Alsina
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Inspired by "[Python] reStructuredText GitHub Podcast directive"
# (https://gist.github.com/brianhsu/1407759), public domain by Brian Hsu
from __future__ import print_function, unicode_literals
'''
Extension to Python Markdown for Embedded Audio
Basic Example:
>>> import markdown
>>> text = """[podcast]http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3[/podcast]"""
>>> html = markdown.markdown(text, [PodcastExtension()])
>>> print(html)
<p><audio src="http://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3"></audio></p>
'''
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
PODCAST_RE = r'\[podcast\](?P<url>.+)\[/podcast\]'
class PodcastPattern(Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, configs):
Pattern.__init__(self, pattern)
def handleMatch(self, m):
url = m.group('url').strip()
audio_elem = etree.Element('audio')
audio_elem.set('controls', '')
source_elem = etree.SubElement(audio_elem, 'source')
source_elem.set('src', url)
source_elem.set('type', 'audio/mpeg')
return audio_elem
class PodcastExtension(Extension):
def __init__(self, configs={}):
# set extension defaults
self.config = {}
# Override defaults with user settings
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
podcast_md_pattern = PodcastPattern(PODCAST_RE, self.getConfigs())
podcast_md_pattern.md = md
md.inlinePatterns.add('podcast', podcast_md_pattern, "<not_strong")
md.registerExtension(self)
def makeExtension(configs=None):
return PodcastExtension(configs)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
|
servalproject/nikola
|
nikola/plugins/compile_markdown/mdx_podcast.py
|
Python
|
mit
| 3,143
|
[
"Brian"
] |
782e5ee6dae95936321060109f37b165ad715446be4aca127f3f1f3e59e25256
|
def attribute_data_observable(data):
return {'type': 'artifact', 'payload_bin': data}
def attribute_data_pattern(data):
return "artifact:payload_bin = '{}'".format(data)
def define_address_type(address):
if ':' in address:
return 'ipv6-addr'
return 'ipv4-addr'
def observable_as(_, attribute_value):
return {'0': {'type': 'autonomous-system', 'number': attribute_value}}
def pattern_as(_, attribute_value):
return "[autonomous-system:number = '{}']".format(attribute_value)
def observable_attachment(*args):
observable = observable_file(args[0], args[1])
if len(args) == 3:
observable['0']['content_ref'] = '0'
return {'0': attribute_data_observable(args[2]), '1': observable['0']}
return observable
def pattern_attachment(*args):
pattern = pattern_file(args[0], args[1])[1:-1]
if len(args) == 3:
pattern += " AND {}".format(attribute_data_pattern(args[2]))
return "[{}]".format(pattern)
def observable_domain(_, attribute_value):
return {'0': {'type': 'domain-name', 'value': attribute_value}}
def pattern_domain(_, attribute_value):
return "[domain-name:value = '{}']".format(attribute_value)
def observable_domain_ip(_, attribute_value):
address_type = define_address_type(attribute_value)
domain_value, ip_value = attribute_value.split('|')
domain = observable_domain(_, domain_value)
domain['0']['resolves_to_refs'] = '1'
domain['1'] = {'type': address_type, 'value': ip_value}
return domain
def pattern_domain_ip(_, attribute_value):
domain_value, ip_value = attribute_value.split('|')
domain = pattern_domain(_, domain_value)[1:-1]
domain += " AND domain-name:resolves_to_refs[*].value = '{}'".format(ip_value)
return "[{}]".format(domain)
def observable_email_address(attribute_type, attribute_value):
email_type = "from_ref" if 'src' in attribute_type else "to_refs"
return {'0': {'type': 'email-addr', 'value': attribute_value},
'1': {'type': 'email-message', email_type: '0', 'is_multipart': 'false'}}
def pattern_email_address(attribute_type, attribute_value):
email_type = "from_ref" if 'src' in attribute_type else "to_refs"
return "[email-message:{} = '{}']".format(email_type, attribute_value)
def observable_email_attachment(_, attribute_value):
observable = observable_file(_, attribute_value)
observable['1'] = {"type": "email-message", 'is_multipart': 'true',
"body_multipart": [{"content_disposition": "attachment; filename=''".format(attribute_value), "body_raw_ref": "0"}]}
return observable
def pattern_email_attachment(_, attribute_value):
return "[email-message:body_multipart[*].body_raw_ref.name = '{}']".format(attribute_value)
def observable_email_message(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
return {'0': {'type': 'email-message', email_type: attribute_value, 'is_multipart': 'false'}}
def pattern_email_message(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
return "[email-message:{} = '{}']".format(email_type, attribute_value)
def observable_file(_, attribute_value):
return {'0': {'type': 'file', 'name': attribute_value}}
def pattern_file(_, attribute_value):
return "[file:name = '{}']".format(attribute_value)
def observable_file_hash(attribute_type, attribute_value):
_, hash_type = attribute_type.split('|')
value1, value2 = attribute_value.split('|')
return {'0': {'type': 'file', 'name': value1, 'hashes': {hash_type: value2}}}
def pattern_file_hash(attribute_type, attribute_value):
_, hash_type = attribute_type.split('|')
value1, value2 = attribute_value.split('|')
return "[file:name = '{0}' AND file:hashes.'{1}' = '{2}']".format(value1, hash_type, value2)
def observable_hash(attribute_type, attribute_value):
return {'0': {'type': 'file', 'hashes': {attribute_type: attribute_value}}}
def pattern_hash(attribute_type, attribute_value):
return "[file:hashes.'{}' = '{}']".format(attribute_type, attribute_value)
def observable_hostname_port(_, attribute_value):
hostname, port = attribute_value.split('|')
hostname_port = observable_domain(_, hostname)
hostname_port[1] = observable_port(_, port)['0']
return hostname_port
def pattern_hostname_port(_, attribute_value):
hostname, port = attribute_value.split('|')
return "[{} AND {}]".format(pattern_domain(_, hostname)[1:-1], pattern_port(_, port)[1:-1])
def observable_ip(attribute_type, attribute_value):
ip_type = attribute_type.split('-')[1]
address_type = define_address_type(attribute_value)
return {'0': {'type': address_type, 'value': attribute_value},
'1': {'type': 'network-traffic', '{}_ref'.format(ip_type): '0',
'protocols': [address_type.split('-')[0]]}}
def pattern_ip(attribute_type, attribute_value):
ip_type = attribute_type.split('-')[1]
address_type = define_address_type(attribute_value)
return "[network-traffic:{0}_ref.type = '{1}' AND network-traffic:{0}_ref.value = '{2}']".format(ip_type, address_type, attribute_value)
def observable_ip_port(attribute_type, attribute_value):
ip_type, _ = attribute_type.split('|')
ip, port = attribute_value.split('|')
ip_port = observable_ip(ip_type, ip)
port_type = "{}_port".format(ip_type.split('-')[1])
ip_port['1'][port_type] = port
return ip_port
def pattern_ip_port(attribute_type, attribute_value):
ip_type, _ = attribute_type.split('|')
ip, port = attribute_value.split('|')
port_type = "{}_port".format(ip_type.split('-')[1])
return "[network-traffic:{} = '{}' AND {}]".format(port_type, port, pattern_ip(ip_type, ip)[1:-1])
def observable_mac_address(_, attribute_value):
return {'0': {'type': 'mac-addr', 'value': attribute_value.lower()}}
def pattern_mac_address(_, attribute_value):
return "[mac-addr:value = '{}']".format(attribute_value.lower())
def observable_malware_sample(*args):
observable = observable_file_hash("filename|md5", args[1])
if len(args) == 3:
observable['0']['content_ref'] = '0'
return {'0': attribute_data_observable(args[2]), '1': observable['0']}
return observable
def pattern_malware_sample(*args):
pattern = pattern_file_hash("filename|md5", args[1])[1:-1]
if len(args) == 3:
pattern += " AND {}".format(attribute_data_pattern(args[2]))
return "[{}]".format(pattern)
def observable_mutex(_, attribute_value):
return {'0': {'type': 'mutex', 'name': attribute_value}}
def pattern_mutex(_, attribute_value):
return "[mutex:name = '{}']".format(attribute_value)
def observable_port(_, attribute_value):
return {'0': {'type': 'network-traffic', 'dst_port': attribute_value, 'protocols': []}}
def pattern_port(_, attribute_value):
return "[network-traffic:dst_port = '{}']".format(attribute_value)
def observable_regkey(_, attribute_value):
return {'0': {'type': 'windows-registry-key', 'key': attribute_value.strip()}}
def pattern_regkey(_, attribute_value):
if '\\\\' not in attribute_value:
attribute_value = attribute_value.replace('\\', '\\\\')
return "[windows-registry-key:key = '{}']".format(attribute_value.strip())
def observable_regkey_value(_, attribute_value):
from stix2 import WindowsRegistryValueType
key, value = attribute_value.split('|')
regkey = observable_regkey(_, key)
regkey['0']['values'] = WindowsRegistryValueType(**{'name': value.strip()})
return regkey
def pattern_regkey_value(_, attribute_value):
key, value = attribute_value.split('|')
if '\\\\' not in value:
value = value.replace('\\', '\\\\')
regkey = pattern_regkey(_, key)[1:-1]
regkey += " AND windows-registry-key:values = '{}'".format(value.strip())
return "[{}]".format(regkey)
def observable_reply_to(_, attribute_value):
return {'0': {'type': 'email-addr', 'value': attribute_value},
'1': {'type': 'email-message', 'additional_header_fields': {'Reply-To': '0'}, 'is_multipart': 'false'}}
def pattern_reply_to(_, attribute_value):
return "[email-message:additional_header_fields.reply_to = '{}']".format(attribute_value)
def observable_url(_, attribute_value):
return {'0': {'type': 'url', 'value': attribute_value}}
def pattern_url(_, attribute_value):
return "[url:value = '{}']".format(attribute_value)
def observable_x509(_, attribute_value):
return {'0': {'type': 'x509-certificate', 'hashes': {'sha1': attribute_value}}}
def pattern_x509(_, attribute_value):
return "[x509-certificate:hashes = '{}']".format(attribute_value)
def return_vulnerability(name):
return {'source_name': 'cve', 'external_id': name}
mispTypesMapping = {
'link': {'to_call': 'handle_link'},
'vulnerability': {'to_call': 'add_vulnerability', 'vulnerability_args': return_vulnerability},
'md5': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha1': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha256': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'filename': {'to_call': 'handle_usual_type', 'observable': observable_file, 'pattern': pattern_file},
'filename|md5': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha1': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha256': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'ip-src': {'to_call': 'handle_usual_type', 'observable': observable_ip, 'pattern': pattern_ip},
'ip-dst': {'to_call': 'handle_usual_type', 'observable': observable_ip, 'pattern': pattern_ip},
'hostname': {'to_call': 'handle_usual_type', 'observable': observable_domain, 'pattern': pattern_domain},
'domain': {'to_call': 'handle_usual_type', 'observable': observable_domain, 'pattern': pattern_domain},
'domain|ip': {'to_call': 'handle_usual_type', 'observable': observable_domain_ip, 'pattern': pattern_domain_ip},
'email-src': {'to_call': 'handle_usual_type', 'observable': observable_email_address, 'pattern': pattern_email_address},
'email-dst': {'to_call': 'handle_usual_type', 'observable': observable_email_address, 'pattern': pattern_email_address},
'email-subject': {'to_call': 'handle_usual_type', 'observable': observable_email_message, 'pattern': pattern_email_message},
'email-body': {'to_call': 'handle_usual_type', 'observable': observable_email_message, 'pattern': pattern_email_message},
'email-attachment': {'to_call': 'handle_usual_type', 'observable': observable_email_attachment, 'pattern': pattern_email_attachment},
'url': {'to_call': 'handle_usual_type', 'observable': observable_url, 'pattern': pattern_url},
'regkey': {'to_call': 'handle_usual_type', 'observable': observable_regkey, 'pattern': pattern_regkey},
'regkey|value': {'to_call': 'handle_usual_type', 'observable': observable_regkey_value, 'pattern': pattern_regkey_value},
'malware-sample': {'to_call': 'handle_usual_type', 'observable': observable_malware_sample, 'pattern': pattern_malware_sample},
'mutex': {'to_call': 'handle_usual_type', 'observable': observable_mutex, 'pattern': pattern_mutex},
'uri': {'to_call': 'handle_usual_type', 'observable': observable_url, 'pattern': pattern_url},
'authentihash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'ssdeep': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'imphash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'pehash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'impfuzzy': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha224': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha384': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512/224': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512/256': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'tlsh': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'cdhash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'filename|authentihash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|ssdeep': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|imphash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|impfuzzy': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|pehash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha224': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha384': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512/224': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512/256': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|tlsh': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'x509-fingerprint-sha1': {'to_call': 'handle_usual_type', 'observable': observable_x509, 'pattern': pattern_x509},
'port': {'to_call': 'handle_usual_type', 'observable': observable_port, 'pattern': pattern_port},
'ip-dst|port': {'to_call': 'handle_usual_type', 'observable': observable_ip_port, 'pattern': pattern_ip_port},
'ip-src|port': {'to_call': 'handle_usual_type', 'observable': observable_ip_port, 'pattern': pattern_ip_port},
'hostname|port': {'to_call': 'handle_usual_type', 'observable': observable_hostname_port, 'pattern': pattern_hostname_port},
'email-reply-to': {'to_call': 'handle_usual_type', 'observable': observable_reply_to, 'pattern': pattern_reply_to},
'attachment': {'to_call': 'handle_usual_type', 'observable': observable_attachment, 'pattern': pattern_attachment},
'mac-address': {'to_call': 'handle_usual_type', 'observable': observable_mac_address, 'pattern': pattern_mac_address},
'AS': {'to_call': 'handle_usual_type', 'observable': observable_as, 'pattern': pattern_as}
#'email-dst-display-name': {'observable': {'0': {'type': 'email-addr', 'display_name': ''}},
# 'pattern': 'email-addr:display_name = \'{0}\''},
#'email-src-display-name': {'observable': {'0': {'type': 'email-addr', 'display_name': ''}},
# 'pattern': 'email-addr:display_name = \'{0}\''}
}
network_traffic_pattern = "network-traffic:{0} = '{1}'"
network_traffic_src_ref = "src_ref.type = '{0}' AND network-traffic:src_ref.value"
network_traffic_dst_ref = "dst_ref.type = '{0}' AND network-traffic:dst_ref.value"
objectsMapping = {'asn': {'to_call': 'handle_usual_object_name',
'observable': {'type': 'autonomous-system'},
'pattern': "autonomous-system:{0} = '{1}' AND "},
'attack-pattern': {'to_call': 'add_attack_pattern_object'},
'course-of-action': {'to_call': 'add_course_of_action_from_object'},
'credential': {'to_call': 'handle_usual_object_name',
'observable': {'type': 'user-account'},
'pattern': "user-account:{0} = '{1}' AND "},
'domain-ip': {'to_call': 'handle_usual_object_name',
'pattern': "domain-name:{0} = '{1}'"},
'email': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'email-message'}},
'pattern': "email-{0}:{1} = '{2}' AND "},
'file': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'file', 'hashes': {}}},
'pattern': "file:{0} = '{1}' AND "},
'ip-port': {'to_call': 'handle_usual_object_name',
'pattern': network_traffic_pattern},
'network-connection': {'to_call': 'handle_usual_object_name',
'pattern': network_traffic_pattern},
'network-socket': {'to_call': 'handle_usual_object_name',
'pattern': network_traffic_pattern},
'pe': {'to_call': 'populate_objects_to_parse'},
'pe-section': {'to_call': 'populate_objects_to_parse'},
'process': {'to_call': 'handle_usual_object_name',
'pattern': "process:{0} = '{1}' AND "},
'registry-key': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'windows-registry-key'}},
'pattern': "windows-registry-key:{0} = '{1}'"},
'stix2-pattern': {'to_call': 'handle_usual_object_name'},
'url': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'url'}},
'pattern': "url:{0} = '{1}'"},
'user-account': {'to_call': 'handle_usual_object_name',
'pattern': "user-account:{0} = '{1}'"},
'vulnerability': {'to_call': 'add_object_vulnerability'},
'x509': {'to_call': 'handle_usual_object_name',
'pattern': "x509-certificate:{0} = '{1}' AND "}
}
asnObjectMapping = {'asn': 'number', 'description': 'name', 'subnet-announced': 'value'}
attackPatternObjectMapping = {'name': 'name', 'summary': 'description'}
credentialObjectMapping = {'password': 'credential', 'username': 'user_id'}
domainIpObjectMapping = {'ip-dst': 'resolves_to_refs[*].value', 'domain': 'value'}
emailObjectMapping = {'email-body': {'email_type': 'message', 'stix_type': 'body'},
'subject': {'email_type': 'message', 'stix_type': 'subject'},
'to': {'email_type': 'message', 'stix_type': 'to_refs'}, 'cc': {'email_type': 'message', 'stix_type': 'cc_refs'},
'to-display-name': {'email_type': 'addr', 'stix_type': 'display_name'},
'from': {'email_type': 'message', 'stix_type': 'from_ref'},
'from-display-name': {'email_type': 'addr', 'stix_type': 'display_name'},
'reply-to': {'email_type': 'message', 'stix_type': 'additional_header_fields.reply_to'},
'attachment': {'email_type': 'message', 'stix_type': 'body_multipart[*].body_raw_ref.name'},
'send-date': {'email_type': 'message', 'stix_type': 'date'},
'x-mailer': {'email_type': 'message', 'stix_type': 'additional_header_fields.x_mailer'}}
fileMapping = {'hashes': "hashes.'{0}'", 'size-in-bytes': 'size', 'filename': 'name', 'mime-type': 'mime_type'}
ipPortObjectMapping = {'ip': network_traffic_dst_ref,
'src-port': 'src_port', 'dst-port': 'dst_port',
'first-seen': 'start', 'last-seen': 'end',
'domain': 'value'}
networkTrafficMapping = {'address-family': 'address_family', 'domain-family': 'protocol_family',
'protocol': 'protocols', 'src-port': 'src_port', 'dst-port': 'dst_port',
'ip-src': network_traffic_src_ref, 'ip-dst': network_traffic_dst_ref,
'hostname-src': network_traffic_src_ref, 'hostname-dst': network_traffic_dst_ref}
peMapping = {'type': 'pe_type', 'number-sections': 'number_of_sections', 'imphash': 'imphash'}
peSectionMapping = {'name': 'name', 'size-in-bytes': 'size', 'entropy': 'entropy'}
processMapping = {'name': 'name', 'pid': 'pid', 'creation-time': 'created'}
regkeyMapping = {'data-type': 'data_type', 'data': 'data', 'name': 'name',
'last-modified': 'modified', 'key': 'key'}
urlMapping = {'url': 'value', 'domain': 'value', 'port': 'dst_port'}
userAccountMapping = {'account-type': 'account_type', 'can_escalate_privs': 'can_escalate_privs',
'created': 'account_created', 'disabled': 'is_disabled', 'display-name': 'display_name',
'expires': 'account_expires', 'first_login': 'account_first_login',
'is_service_account': 'is_service_account', 'last_login': 'account_last_login',
'password': 'credential', 'password_last_changed': 'credential_last_changed',
'privileged': 'is_privileged', 'username': 'account_login', 'user-id': 'user_id'}
unixAccountExtensionMapping = {'group': 'groups', 'group-id': 'gid', 'home_dir': 'home_dir', 'shell': 'shell'}
x509mapping = {'pubkey-info-algorithm': 'subject_public_key_algorithm', 'subject': 'subject',
'pubkey-info-exponent': 'subject_public_key_exponent', 'issuer': 'issuer',
'pubkey-info-modulus': 'subject_public_key_modulus', 'serial-number': 'serial_number',
'validity-not-before': 'validity_not_before', 'validity-not-after': 'validity_not_after',
'version': 'version',}
defineProtocols = {'80': 'http', '443': 'https'}
tlp_markings = {'tlp:white': 'TLP_WHITE', 'tlp:green': 'TLP_GREEN',
'tlp:amber': 'TLP_AMBER', 'tlp:red': 'TLP_RED'}
relationshipsSpecifications = {'attack-pattern': {'vulnerability': 'targets', 'identity': 'targets',
'malware': 'uses', 'tool': 'uses'},
'campaign': {'intrusion-set': 'attributed-to', 'threat-actor': 'attributed-to',
'identity': 'targets', 'vulnerability': 'targets',
'attack-pattern': 'uses', 'malware': 'uses',
'tool': 'uses'},
'course-of-action':{'attack-pattern': 'mitigates', 'malware': 'mitigates',
'tool': 'mitigates', 'vulnerability': 'mitigates'},
'indicator': {'attack-pattern': 'indicates', 'campaign': 'indicates',
'intrusion-set': 'indicates', 'malware': 'indicates',
'threat-actor': 'indicates', 'tool': 'indicates'},
'intrusion-set': {'threat-actor': 'attributed-to', 'identity': 'targets',
'vulnerability': 'targets', 'attack-pattern': 'uses',
'malware': 'uses', 'tool': 'uses'},
'malware': {'identity': 'targets', 'vulnerability': 'targets',
'tool': 'uses', 'malware': 'variant-of'},
'threat-actor': {'identity': 'attributed-to', 'vulnerability': 'targets',
'attack-pattern': 'uses', 'malware': 'uses',
'tool': 'uses'},
'tool': {'identity': 'targets', 'vulnerability': 'targets'}
}
|
FIRSTdotorg/MISP
|
app/files/scripts/stix2/misp2stix2_mapping.py
|
Python
|
agpl-3.0
| 24,281
|
[
"Amber"
] |
0fdc14ee59476cbe4aff3505b87e1aed219c31c688eea5bd8816d580ffff6faa
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This package implements modules test the input and output modules for QChem
"""
|
gpetretto/pymatgen
|
pymatgen/io/qchem_io/tests/__init__.py
|
Python
|
mit
| 234
|
[
"pymatgen"
] |
077934ba45dffefae8a750113052458beb3d067eac20600e6657771cad34b796
|
#!/usr/bin/env python
from compliance_checker.cf import CFBaseCheck, BaseCheck, dimless_vertical_coordinates
from compliance_checker.cf.util import is_vertical_coordinate, is_time_variable, units_convertible
from compliance_checker.base import DSPair
from wicken.netcdf_dogma import NetCDFDogma
from netCDF4 import Dataset
from tempfile import gettempdir
from pkg_resources import resource_filename
import unittest
import os
import re
static_files = {
'rutgers' : resource_filename('compliance_checker', 'tests/data/ru07-20130824T170228_rt0.nc'),
'conv_multi' : resource_filename('compliance_checker', 'tests/data/conv_multi.nc'),
'conv_bad' : resource_filename('compliance_checker', 'tests/data/conv_bad.nc'),
'example-grid' : resource_filename('compliance_checker', 'tests/data/example-grid.nc'),
'badname' : resource_filename('compliance_checker', 'tests/data/non-comp/badname.netcdf'),
'bad' : resource_filename('compliance_checker', 'tests/data/non-comp/bad.nc'),
'dimensionless' : resource_filename('compliance_checker', 'tests/data/dimensionless.nc'),
'2dim' : resource_filename('compliance_checker', 'tests/data/2dim-grid.nc'),
'bad2dim' : resource_filename('compliance_checker', 'tests/data/non-comp/bad2dim.nc'),
'rhgrid' : resource_filename('compliance_checker', 'tests/data/rhgrid.nc'),
'bad-rhgrid' : resource_filename('compliance_checker', 'tests/data/non-comp/bad-rhgrid.nc'),
'bad_data_type' : resource_filename('compliance_checker', 'tests/data/bad_data_type.nc'),
'mapping' : resource_filename('compliance_checker', 'tests/data/mapping.nc'),
'bad_region' : resource_filename('compliance_checker', 'tests/data/bad_region.nc'),
'featureType' : resource_filename('compliance_checker', 'tests/data/example-grid.nc'),
'cont_ragged' : resource_filename('compliance_checker', 'tests/data/cont_ragged.nc'),
'index_ragged' : resource_filename('compliance_checker', 'tests/data/index_ragged.nc'),
'bad_missing_data' : resource_filename('compliance_checker', 'tests/data/bad_missing_data.nc'),
'self-referencing-var' : resource_filename('compliance_checker', 'tests/data/self-referencing-var.nc')
}
class MockVariable(object):
'''
For mocking a dataset variable
'''
pass
class TestCF(unittest.TestCase):
# @see
# http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/
def shortDescription(self):
return None
# override __str__ and __repr__ behavior to show a copy-pastable nosetest name for ion tests
# ion.module:TestClassName.test_function_name
def __repr__(self):
name = self.id()
name = name.split('.')
if name[0] not in ["ion", "pyon"]:
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
else:
return "%s ( %s )" % (name[-1], '.'.join(name[:-2]) + ":" + '.'.join(name[-2:]))
__str__ = __repr__
def setUp(self):
'''
Initialize the dataset
'''
self.cf = CFBaseCheck()
#--------------------------------------------------------------------------------
# Helper Methods
#--------------------------------------------------------------------------------
def new_nc_file(self):
'''
Make a new temporary netCDF file for the scope of the test
'''
nc_file_path = os.path.join(gettempdir(), 'example.nc')
if os.path.exists(nc_file_path):
raise IOError('File Exists: %s' % nc_file_path)
nc = Dataset(nc_file_path, 'w')
self.addCleanup(os.remove, nc_file_path)
self.addCleanup(nc.close)
return nc
def get_pair(self, nc_dataset):
'''
Return a pairwise object for the dataset
'''
if isinstance(nc_dataset, basestring):
nc_dataset = Dataset(nc_dataset, 'r')
self.addCleanup(nc_dataset.close)
dogma = NetCDFDogma('nc', self.cf.beliefs(), nc_dataset)
pair = DSPair(nc_dataset, dogma)
return pair
#--------------------------------------------------------------------------------
# Compliance Tests
#--------------------------------------------------------------------------------
def test_check_data_types(self):
"""
2.2 The netCDF data types char, byte, short, int, float or real, and double are all acceptable
"""
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_data_types(dataset)
self.assertTrue(result.value)
dpair = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_data_types(dpair)
assert result.value == (5, 6)
def test_naming_conventions(self):
'''
Section 2.3 Naming Conventions
Variable, dimension and attribute names should begin with a letter and be composed of letters, digits, and underscores.
'''
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_naming_conventions(dataset)
num_var = len(dataset.dataset.variables)
expected = (num_var,) * 2
self.assertEquals(result.value, expected)
dataset = self.get_pair(static_files['bad'])
result = self.cf.check_naming_conventions(dataset)
num_var = len(dataset.dataset.variables)
expected = (num_var-1, num_var)
self.assertEquals(result.value, expected)
assert '_poor_dim' in result.msgs [0]
def test_check_names_unique(self):
"""
2.3 names should not be distinguished purely by case, i.e., if case is disregarded, no two names should be the same.
"""
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_names_unique(dataset)
num_var = len(dataset.dataset.variables)
expected = (num_var,) * 2
self.assertEquals(result.value, expected)
#TODO: Add bad unique names to bad.nc
def test_check_dimension_names(self):
"""
2.4 A variable may have any number of dimensions, including zero, and the dimensions must all have different names.
"""
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_dimension_names(dataset)
assert result.value == (5, 6)
def test_check_dimension_order(self):
"""
2.4 If any or all of the dimensions of a variable have the interpretations of "date or time" (T), "height or depth" (Z),
"latitude" (Y), or "longitude" (X) then we recommend, those dimensions to appear in the relative order T, then Z, then Y,
then X in the CDL definition corresponding to the file. All other dimensions should, whenever possible, be placed to the
left of the spatiotemporal dimensions.
"""
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_dimension_order(dataset)
assert result.value == (11, 12)
def test_check_fill_value_outside_valid_range(self):
"""
2.5.1 The _FillValue should be outside the range specified by valid_range (if used) for a variable.
"""
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_fill_value_outside_valid_range(dataset)
assert result.value == (1, 2)
def test_check_conventions_are_cf_16(self):
"""
2.6.1 the NUG defined global attribute Conventions to the string value "CF-1.6"
"""
# :Conventions = "CF-1.6"
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_conventions_are_cf_16(dataset)
self.assertTrue(result.value)
# :Conventions = "CF-1.6 ,ACDD" ;
dataset = self.get_pair(static_files['conv_multi'])
result = self.cf.check_conventions_are_cf_16(dataset)
self.assertTrue(result.value)
# :Conventions = "NoConvention"
dataset = self.get_pair(static_files['conv_bad'])
result = self.cf.check_conventions_are_cf_16(dataset)
self.assertFalse(result.value)
def test_check_convention_globals(self):
"""
2.6.2 title/history global attributes, must be strings. Do not need to exist.
"""
#check for pass
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_convention_globals(dataset)
for each in result:
self.assertTrue(each.value)
#check if it doesn't exist that we pass
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_convention_globals(dataset)
for each in result:
self.assertTrue(each.value)
def test_check_convention_possibly_var_attrs(self):
"""
3.1 The units attribute is required for all variables that represent dimensional quantities
(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables
defined in Section 7.4, "Climatological Statistics").
Units are not required for dimensionless quantities. A variable with no units attribute is assumed
to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be
included.
- units required
- type must be recognized by udunits
- if std name specified, must be consistent with standard name table, must also be consistent with a
specified cell_methods attribute if present
"""
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_convention_possibly_var_attrs(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_convention_possibly_var_attrs(dataset)
for each in result:
self.assertFalse(each.value)
def test_check_standard_name(self):
"""
3.3 A standard name is associated with a variable via the attribute standard_name which takes a
string value comprised of a standard name optionally followed by one or more blanks and a
standard name modifier
"""
dataset = self.get_pair(static_files['2dim'])
result = self.cf.check_standard_name(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_standard_name(dataset)
for each in result:
self.assertFalse(each.value)
def test_check_units(self):
dataset = self.get_pair(static_files['2dim'])
result = self.cf.check_units(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_units(dataset)
for each in result:
self.assertFalse(each.value)
def test_coordinate_types(self):
'''
Section 4 Coordinate Types
We strongly recommend that coordinate variables be used for all coordinate types whenever they are applicable.
'''
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_coordinate_vars_for_all_coordinate_types(dataset)
for each in result:
print each
self.assertTrue(each.value)
def test_check_coordinate_axis_attr(self):
dataset = self.get_pair(static_files['2dim'])
result = self.cf.check_coordinate_axis_attr(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_coordinate_axis_attr(dataset)
for each in result:
print each
if each.name[1] in ['time', 'latitude']:
self.assertTrue(each.value)
if each.name[1] in ['salinity']:
if each.name[2] not in ['does_not_depend_on_mult_coord_vars']:
self.assertFalse(each.value)
def test_latitude(self):
'''
Section 4.1 Latitude Coordinate
'''
# Check compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_latitude(dataset)
for r in results:
if isinstance(r.value, tuple):
self.assertEquals(r.value[0], r.value[1])
else:
self.assertTrue(r.value)
# Verify non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_latitude(dataset)
# Store the results in a dict
rd = {}
for r in results:
rd[r.name[1:]] = r.value
# ('lat', 'has_units') should be False
self.assertFalse(rd[('lat', 'has_units')])
# ('lat', 'correct_units') should be (0,3)
self.assertEquals(rd[('lat', 'correct_units')], (0,3))
# ('lat_uv', 'has_units') should be True
self.assertTrue(rd[('lat_uv', 'has_units')])
# ('lat_uv', 'correct_units') should be (2,3)
self.assertEquals(rd[('lat_uv', 'correct_units')], (2,3))
# ('lat_like', 'has_units') should be True
self.assertTrue(rd[('lat_like', 'has_units')])
# ('lat_like', 'correct_units') should be (1,3)
self.assertEquals(rd[('lat_like', 'correct_units')], (1,3))
def test_longitude(self):
'''
Section 4.2 Longitude Coordinate
'''
# Check compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_longitude(dataset)
for r in results:
if isinstance(r.value, tuple):
self.assertEquals(r.value[0], r.value[1])
else:
self.assertTrue(r.value)
# Verify non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_longitude(dataset)
# Store the results in a dict
rd = {}
for r in results:
rd[r.name[1:]] = r.value
# ('lon', 'has_units') should be False
self.assertFalse(rd[('lon', 'has_units')])
# ('lon', 'correct_units') should be (0,3)
self.assertEquals(rd[('lon', 'correct_units')], (0,3))
# ('lon_uv', 'has_units') should be True
self.assertTrue(rd[('lon_uv', 'has_units')])
# ('lon_uv', 'correct_units') should be (2,3)
self.assertEquals(rd[('lon_uv', 'correct_units')], (2,3))
# ('lon_like', 'has_units') should be True
self.assertTrue(rd[('lon_like', 'has_units')])
# ('lon_like', 'correct_units') should be (1,3)
self.assertEquals(rd[('lon_like', 'correct_units')], (1,3))
def test_is_vertical_coordinate(self):
'''
Section 4.3 Qualifiers for Vertical Coordinate
NOTE: The standard doesn't explicitly say that vertical coordinates must be a
coordinate type.
'''
# Make something that I can attach attrs to
mock_variable = MockVariable
# Proper name/standard_name
known_name = mock_variable()
known_name.standard_name = 'depth'
self.assertTrue(is_vertical_coordinate('not_known', known_name))
# Proper Axis
axis_set = mock_variable()
axis_set.axis = 'Z'
self.assertTrue(is_vertical_coordinate('not_known', axis_set))
# Proper units
units_set = mock_variable()
units_set.units = 'dbar'
self.assertTrue(is_vertical_coordinate('not_known', units_set))
# Proper units/positive
positive = mock_variable()
positive.units = 'm'
positive.positive = 'up'
self.assertTrue(is_vertical_coordinate('not_known', positive))
def test_vertical_coordinate(self):
'''
Section 4.3 Vertical (Height or Depth) coordinate
'''
# Check compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_vertical_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
# Check non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_vertical_coordinate(dataset)
# Store the results by the tuple
rd = { r.name[1:] : r.value for r in results }
# ('height', 'has_units') should be False
self.assertFalse(rd[('height', 'has_units')])
# ('height', 'correct_units') should be False
self.assertFalse(rd[('height', 'correct_units')])
# ('depth', 'has_units') should be True
self.assertTrue(rd[('depth', 'has_units')])
# ('depth', 'correct_units') should be False
self.assertFalse(rd[('depth', 'correct_units')])
# ('depth2', 'has_units') should be False
self.assertTrue(rd[('depth2', 'has_units')])
# ('depth2', 'correct_units') should be False
self.assertFalse(rd[('depth2', 'correct_units')])
def test_vertical_dimension(self):
'''
Section 4.3.1 Dimensional Vertical Coordinate
'''
# Check for compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_dimensional_vertical_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
# Check for non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_dimensional_vertical_coordinate(dataset)
for r in results:
self.assertFalse(r.value)
def test_appendix_d(self):
'''
CF 1.6
Appendix D
The definitions given here allow an application to compute dimensional
coordinate values from the dimensionless ones and associated variables.
The formulas are expressed for a gridpoint (n,k,j,i) where i and j are
the horizontal indices, k is the vertical index and n is the time index.
A coordinate variable is associated with its definition by the value of
the standard_name attribute. The terms in the definition are associated
with file variables by the formula_terms attribute. The formula_terms
attribute takes a string value, the string being comprised of
blank-separated elements of the form "term: variable", where term is a
keyword that represents one of the terms in the definition, and variable
is the name of the variable in a netCDF file that contains the values
for that term. The order of elements is not significant.
'''
dimless = dict(dimless_vertical_coordinates)
def verify(std_name, test_str):
regex_matches = re.match(dimless[std_name], test_str)
self.assertIsNotNone(regex_matches)
# For each of the listed dimensionless vertical coordinates,
# verify that the formula_terms match the provided regex
verify('atmosphere_ln_pressure_coordinate',
"p0: var1 lev: var2")
verify('atmosphere_sigma_coordinate',
"sigma: var1 ps: var2 ptop: var3")
verify('atmosphere_hybrid_sigma_pressure_coordinate',
"a: var1 b: var2 ps: var3 p0: var4")
verify('atmosphere_hybrid_height_coordinate',
"a: var1 b: var2 orog: var3")
verify('atmosphere_sleve_coordinate',
"a: var1 b1: var2 b2: var3 ztop: var4 zsurf1: var5 zsurf2: var6")
verify('ocean_sigma_coordinate',
"sigma: var1 eta: var2 depth: var3")
verify('ocean_s_coordinate',
"s: var1 eta: var2 depth: var3 a: var4 b: var5 depth_c: var6")
verify('ocean_sigma_z_coordinate',
"sigma: var1 eta: var2 depth: var3 depth_c: var4 nsigma: var5 zlev: var6")
verify('ocean_double_sigma_coordinate',
"sigma: var1 depth: var2 z1: var3 z2: var4 a: var5 href: var6 k_c: var7")
def test_dimensionless_vertical(self):
'''
Section 4.3.2
'''
# Check affirmative compliance
dataset = self.get_pair(static_files['dimensionless'])
results = self.cf.check_dimensionless_vertical_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
# Check negative compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_dimensionless_vertical_coordinate(dataset)
rd = { r.name[1:] : r.value for r in results }
# ('lev1', 'formula_terms') should be False
self.assertFalse(rd[('lev1', 'formula_terms')])
# ('lev2', 'formula_terms') should be True
self.assertTrue(rd[('lev2', 'formula_terms')])
# ('lev2', 'terms_exist') should be False
self.assertFalse(rd[('lev2', 'terms_exist')])
def test_is_time_variable(self):
var1 = MockVariable()
var1.standard_name = 'time'
self.assertTrue(is_time_variable('not_time', var1))
var2 = MockVariable()
self.assertTrue(is_time_variable('time', var2))
self.assertFalse(is_time_variable('not_time', var2))
var3 = MockVariable()
var3.axis = 'T'
self.assertTrue(is_time_variable('maybe_time', var3))
var4 = MockVariable()
var4.units = 'seconds since 1900-01-01'
self.assertTrue(is_time_variable('maybe_time', var4))
def test_check_time_coordinate(self):
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_time_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_time_coordinate(dataset)
rd = {r.name[1:] : r.value for r in results }
self.assertFalse(rd[('bad_time_1', 'has_units')])
self.assertTrue(rd[('bad_time_2', 'has_units')])
self.assertFalse(rd[('bad_time_2', 'correct_units')])
def test_check_calendar(self):
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_calendar(dataset)
for r in results:
self.assertTrue(r.value)
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_calendar(dataset)
rd = {r.name[1:] : r.value for r in results }
self.assertFalse(rd[('bad_time_1', 'has_calendar')])
self.assertFalse(rd[('bad_time_1', 'valid_calendar')])
self.assertTrue(rd[('bad_time_2', 'has_calendar')])
self.assertFalse(rd[('bad_time_2', 'valid_calendar')])
def test_check_independent_axis_dimensions(self):
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_independent_axis_dimensions(dataset)
for r in results:
self.assertTrue(r.value)
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_independent_axis_dimensions(dataset)
false_variable_name_list = ['lev1', 'lev2', 'bad_time_1', 'bad_time_2', 'column_temp']
for each in results:
if each.name[1] in false_variable_name_list:
self.assertFalse(each.value)
else:
self.assertTrue(each.value)
def test_check_two_dimensional(self):
dataset = self.get_pair(static_files['2dim'])
results = self.cf.check_two_dimensional(dataset)
for r in results:
self.assertTrue(r.value)
# Need the bad testing
dataset = self.get_pair(static_files['bad2dim'])
results = self.cf.check_two_dimensional(dataset)
self.assertTrue(results[0].value)
self.assertFalse(results[1].value)
self.assertFalse(results[2].value)
self.assertTrue(results[3].value)
self.assertFalse(results[4].value)
self.assertTrue(results[5].value)
# Test the self referencing variables
dataset = self.get_pair(static_files['self-referencing-var'])
try:
results = self.cf.check_two_dimensional(dataset)
self.assertFalse(results[0].value)
except:
self.assertTrue(False)
def test_check_reduced_horizontal_grid(self):
dataset = self.get_pair(static_files['rhgrid'])
results = self.cf.check_reduced_horizontal_grid(dataset)
rd = { r.name[1] : r.value for r in results }
self.assertTrue(rd['PS'])
dataset = self.get_pair(static_files['bad-rhgrid'])
results = self.cf.check_reduced_horizontal_grid(dataset)
rd = { r.name[1] : (r.value, r.msgs) for r in results }
for name, (value, msg) in rd.iteritems():
self.assertFalse(value)
self.assertIn('Coordinate longitude is not a proper variable', rd['PSa'][1])
self.assertIn("Coordinate latitude's dimension, latdim, is not a dimension of PSb", rd['PSb'][1])
assert 'PSc' not in rd.keys()
def test_check_horz_crs_grid_mappings_projections(self):
dataset = self.get_pair(static_files['mapping'])
results = self.cf.check_horz_crs_grid_mappings_projections(dataset)
rd = { r.name[1] : r.value for r in results }
assert rd['wgs84'] == (3, 3)
assert rd['epsg'] == (7, 8)
def test_check_scalar_coordinate_system(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_scalar_coordinate_system(dataset)
assert results[0].value == (1, 2)
def test_check_geographic_region(self):
dataset = self.get_pair(static_files['bad_region'])
results = self.cf.check_geographic_region(dataset)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
def test_check_alternative_coordinates(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_alternative_coordinates(dataset)
self.assertTrue(results[0].value)
#def test_check_cell_boundaries(self):
# dataset = self.get_pair(static_files['bad_data_type'])
# results = self.cf.check_cell_boundaries(dataset)
# print results
# self.assertTrue(results[0].value)
def test_check_packed_data(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_packed_data(dataset)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
def test_check_compression(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_compression(dataset)
assert results[0].value == (2,2)
assert results[1].value == (0,2)
def test_check_all_features_are_same_type(self):
dataset = self.get_pair(static_files['rutgers'])
results = self.cf.check_all_features_are_same_type(dataset)
assert results == None
dataset = self.get_pair(static_files['featureType'])
results = self.cf.check_all_features_are_same_type(dataset)
self.assertTrue(results.value)
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_all_features_are_same_type(dataset)
self.assertFalse(results.value)
def test_check_orthogonal_multidim_array(self):
dataset = self.get_pair(static_files['rutgers'])
results = self.cf.check_orthogonal_multidim_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_incomplete_multidim_array(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_incomplete_multidim_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_contiguous_ragged_array(self):
dataset = self.get_pair(static_files['cont_ragged'])
results = self.cf.check_contiguous_ragged_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_indexed_ragged_array(self):
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_indexed_ragged_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_feature_type(self):
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_feature_type(dataset)
self.assertTrue(results.value)
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_feature_type(dataset)
self.assertFalse(results.value)
def test_check_coordinates_and_metadata(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_coordinates_and_metadata(dataset)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
self.assertFalse(results[2].value)
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_coordinates_and_metadata(dataset)
self.assertTrue(results[-1].value)
def test_check_missing_data(self):
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_missing_data(dataset)
for each in results:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_missing_data'])
results = self.cf.check_missing_data(dataset)
for each in results:
self.assertFalse(each.value)
#--------------------------------------------------------------------------------
# Utility Method Tests
#--------------------------------------------------------------------------------
def test_temporal_unit_conversion(self):
self.assertTrue(units_convertible('hours', 'seconds'))
self.assertFalse(units_convertible('hours', 'hours since 2000-01-01'))
def breakpoint(scope=None, global_scope=None):
import traceback
from IPython.config.loader import Config
ipy_config = Config()
ipy_config.PromptManager.in_template = '><> '
ipy_config.PromptManager.in2_template = '... '
ipy_config.PromptManager.out_template = '--> '
ipy_config.InteractiveShellEmbed.confirm_exit = False
# First import the embeddable shell class
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from mock import patch
if scope is not None:
locals().update(scope)
if global_scope is not None:
globals().update(global_scope)
# Update namespace of interactive shell
# TODO: Cleanup namespace even further
# Now create an instance of the embeddable shell. The first argument is a
# string with options exactly as you would type them if you were starting
# IPython at the system command line. Any parameters you want to define for
# configuration can thus be specified here.
with patch("IPython.core.interactiveshell.InteractiveShell.init_virtualenv"):
ipshell = InteractiveShellEmbed(config=ipy_config,
banner1="Entering Breakpoint Shell",
exit_msg = 'Returning...')
stack = traceback.extract_stack(limit=2)
message = 'File %s, line %s, in %s' % stack[0][:-1]
try:
import growl
growl.growl('breakpoint', 'Ready')
except:
pass
ipshell('(%s) Breakpoint @ %s' % ('breakpoint', message))
|
webtrike/compliance-checker
|
compliance_checker/tests/test_cf.py
|
Python
|
apache-2.0
| 31,772
|
[
"NetCDF"
] |
405f6c785f598b247729afb7093bc50b41b4cb0978a4f09f8aba76bbd4daf030
|
import requests
"""
Communications library for commercial (COTS) USB weather stations:
https://github.com/jim-easterbrook/pywws/
Docs on adding your weather station to openweathermap.org:
http://openweathermap.org/stations
"""
def fetch_IP_geodata():
"""
Retrieve geo data keyed by our IP address from ip-api.com.
WARNING: API will block any IP doing 150+ requests/minute.
If blocked, visit http://ip-api.com/docs/unban
Documentation: http://ip-api.com/docs/api:json
Returns a dict decoded from the API's JSON response
Returns 'None' if ANY errors occur, service is unavailable.
"""
API_URL = 'http://ip-api.com/json'
response = requests.get(API_URL)
if (response):
geodata = response.json()
return geodata
return None
def fetch_local_weather(lat, lon, API_key=""):
"""
Retrieve current weather from api.openweathermap.org.
WARNING: Free API allows a maximum of 60 requests/minute
Documentation: http://openweathermap.org/current#geo
Returns a dict decoded from the API's JSON response
Returns 'None' if ANY errors occur, service is unavailable.
"""
API_URL = 'http://api.openweathermap.org/data/2.5/weather'
API_KEY ='e5591c0f23cf2f87b7854d06192b36af'
local_params = dict(lat=lat, lon=lon, APPID=API_KEY)
response = requests.get(API_URL, params=local_params)
if (response):
local_weather = response.json()
if local_weather['cod'] == 200:
return local_weather
return None
def fetch_local_forecast(lat, lon, us_zip="", API_key=""):
"""
Retrieve 5 day forecast from api.openweathermap.org.
Documentation: http://openweathermap.org/forecast5
Returns a dict decoded from the API's JSON response
Returns 'None' if ANY errors occur, service is unavailable.
"""
API_URL = 'http://api.openweathermap.org/data/2.5/forecast'
API_KEY = 'e5591c0f23cf2f87b7854d06192b36af'
local_params = dict(lat=lat, lon=lon, APPID=API_KEY)
response = requests.get(API_URL, params=local_params)
if (response):
local_forecast = response.json()
#TODO We need to find the schema for this data and test 'cod' == 200
if (local_forecast):
return local_forecast
return None
def to_fahrenheit(celsius_temp):
"""convert celsius to degrees fahrenheit."""
return 1.8 * (celsius_temp) + 32
def to_celsius(kelvin_temp):
"""convert kelvin to celsius."""
return (kelvin_temp - 273.15)
if __name__ == '__main__':
# Do some Tests:
# Grab our network connection's geographic data:
geodata = fetch_IP_geodata()
if (geodata):
print("Geo Data: ")
print(geodata)
else:
print("ERROR: Bad Geo Data Request: None")
# Now lets use our geodata to get some local weather information:
local_weather = fetch_local_weather(geodata['lat'], geodata['lon'])
if (local_weather):
print("Local Weather Data: ")
print(local_weather)
else:
print("ERROR: Bad Local Weather Request: None")
# Next lets use our geodata to get the 5-day forecast:
local_forecast = fetch_local_forecast(geodata['lat'], geodata['lon'])
if (local_forecast):
print("Local Forecast Data: ")
print(local_forecast)
else:
print("ERROR: Bad Forecast Request: None")
|
Sonophoto/PythonNotes
|
weatherLookup/weatherLookup.py
|
Python
|
bsd-2-clause
| 3,359
|
[
"VisIt"
] |
552d5df6f6ac3543a79dcb0619132d0697c1d64cc3ef5e195615ab9c6a916061
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import itk
import module_kits.itk_kit as itk_kit
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
class levelSetMotionRegistration(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._config.numberOfIterations = 50
self._config.gradSmoothStd = 1.0
self._config.alpha = 0.1
self._config.idiffThresh = 0.001
configList = [
('Number of iterations:', 'numberOfIterations',
'base:int', 'text',
'Number of iterations for the Demons registration to run.'),
('Gradient smoothing standard deviation:', 'gradSmoothStd',
'base:float', 'text',
'The standard deviation of the Gaussian kernel in physical '
'units that will be '
'used to smooth the images before calculating gradients.'),
('Stability parameter alpha:', 'alpha', 'base:float', 'text',
'Used to stabilise small gradient magnitude values. Set to '
'approximately 0.04% of intensity range of input images.'),
('Intensity difference threshold:', 'idiffThresh',
'base:float', 'text',
'Voxels differing with less than this threshold are considered '
'equal')]
# input 1 is fixed, input 2 is moving
# matcher.SetInput(moving)
# matcher.SetReferenceImage(fixed)
if3 = itk.Image.F3
self._matcher = itk.HistogramMatchingImageFilter[if3,if3].New()
self._matcher.SetNumberOfHistogramLevels(1024)
self._matcher.SetNumberOfMatchPoints(7)
self._matcher.ThresholdAtMeanIntensityOn()
ivf3 = itk.Image.VF33
ls = itk.LevelSetMotionRegistrationFilter[if3, if3, ivf3].New()
self._levelSetMotion = ls
self._levelSetMotion.SetMovingImage(self._matcher.GetOutput())
# we should get a hold of GetElapsedIterations...
# DenseFiniteDifference -> PDEDeformableRegistration -> LevelSetMotion
# Dense still has it, PDE onwards doesn't. Dense is templated on
# input and output, PDE on two image types and a deformation field...
itk_kit.utils.setupITKObjectProgress(
self, self._levelSetMotion, 'LevelSetMotionRegistrationFilter',
'Performing registration, metric = %.2f',
('GetMetric()',))
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'LevelSetMotionRegistrationFilter' : self._levelSetMotion,
'itkHistogramMatchingImageFilter' : self._matcher})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# and the baseclass close
ModuleBase.close(self)
# remove all bindings
del self._levelSetMotion
del self._matcher
def execute_module(self):
self.get_output(0).Update()
def get_input_descriptions(self):
return ('Fixed image (ITK 3D Float)', 'Moving image (ITK 3D Float)')
def set_input(self, idx, inputStream):
if idx == 0:
self._matcher.SetReferenceImage(inputStream)
self._levelSetMotion.SetFixedImage(inputStream)
else:
self._matcher.SetInput(inputStream)
def get_output_descriptions(self):
return ('Deformation field (ITK 3D Float vectors)',)
def get_output(self, idx):
return self._levelSetMotion.GetOutput()
def config_to_logic(self):
self._levelSetMotion.SetNumberOfIterations(
self._config.numberOfIterations)
self._levelSetMotion.SetGradientSmoothingStandardDeviations(
self._config.gradSmoothStd)
self._levelSetMotion.SetAlpha(self._config.alpha)
self._levelSetMotion.SetIntensityDifferenceThreshold(
self._config.idiffThresh)
def logic_to_config(self):
self._config.numberOfIterations = self._levelSetMotion.\
GetNumberOfIterations()
self._config.gradSmoothStd = \
self._levelSetMotion.\
GetGradientSmoothingStandardDeviations()
self._config.alpha = \
self._levelSetMotion.GetAlpha()
self._config.idiffThresh = \
self._levelSetMotion.\
GetIntensityDifferenceThreshold()
|
nagyistoce/devide
|
modules/insight/levelSetMotionRegistration.py
|
Python
|
bsd-3-clause
| 5,044
|
[
"Gaussian"
] |
513b0bbb5adda810739edfb0bf40d7a5fb610b76b574bb89f3fd2015d9755da7
|
"""Reads the GloVe word embeddings
Visit: https://nlp.stanford.edu/projects/glove/"""
import os
import numpy as np
import anna.data.utils as utils
DESTINATION = "glove"
NAME = "glove.840B.300d"
TXT_NAME = NAME + ".txt"
ZIP_NAME = NAME + ".zip"
URL = "http://nlp.stanford.edu/data/" + ZIP_NAME
def fetch_and_parse(data_dir, voc_size=None):
"""
Fetches and parses the GloVe word embeddings dataset. The dataset is
also cached as a pickle for further calls.
Args:
data_dir (str): absolute path to the dir where datasets are stored
voc_size (int): maximum size of the vocabulary, None for no limit
Returns:
voc (list[str]): list of words, matching the index in `emb`
emb (numpy.array): array of embeddings for each word in `voc`
"""
return parse(fetch(data_dir), voc_size)
def parse(glove_dir, voc_size):
"""
Parses the glove word embeddings.
Args:
glove_dir (str): absolute path to the extracted word embeddings
voc_size (int): maximum size of the vocabulary, None for no limit
Returns:
voc (list[str]): list of words, matching the index in `emb`
emb (numpy.array): array of embeddings for each word in `voc`
"""
voc = []
emb = []
words = set()
glove_path = os.path.join(glove_dir, TXT_NAME)
with open(glove_path) as f:
for line in f:
parts = line.split(" ")
word = parts[0]
if word not in words:
words.add(word)
voc.append(word)
emb.append([float(n) for n in parts[1:]])
if len(words) >= voc_size:
break
return utils.add_special_tokens(voc, np.array(emb))
def fetch(data_dir):
"""
Fetches and extracts pre-trained GloVe word vectors.
Args:
data_dir (str): absolute path to the folder where datasets are stored
Returns:
glove_dir (str): absolute path to the folder where glove is stored
"""
file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)
txt_path = os.path.join(data_dir, DESTINATION, TXT_NAME)
return utils.fetch(URL, file_path, txt_path)
|
jpbottaro/anna
|
anna/data/dataset/glove.py
|
Python
|
mit
| 2,177
|
[
"VisIt"
] |
cadc7ec1638433fa16550677ad9ab1be51f80874521e9d1f77c77fb5b7d766d0
|
"""
example script for running a RNA-seq analysis
python rnaseq_pipeline.py rnaseq_pipeline.yaml
you will have to write a couple of functions to group the input
data in useful ways
"""
from cluster_helper.cluster import cluster_view
import sys
import yaml
from bipy.log import setup_logging, logger
from bcbio.utils import safe_makedir, file_exists
from bipy.utils import (combine_pairs, flatten, append_stem,
prepare_ref_file, replace_suffix)
from bipy.toolbox import (htseq_count, deseq, annotate, rseqc, sam)
from bcbio.broad import BroadRunner, picardrun
from bipy.toolbox.trim import Cutadapt
from bipy.toolbox.fastqc import FastQC
from bipy.toolbox.fastq import HardClipper
from bipy.toolbox.tophat import Tophat
from bipy.toolbox.rseqc import RNASeqMetrics
from bipy.plugins import StageRepository
import glob
from itertools import product, repeat, islice
import sh
import os, fnmatch
def locate(pattern, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def make_test(in_file, config, lines=1000000):
"""
take a small subset of the input files for testing. only makes sense for
text files where lines gives an appopriate number of records, for example,
FASTQ files should be a multiple of 4.
"""
results_dir = config["dir"]["results"]
out_dir = os.path.join(results_dir, "test", "data")
safe_makedir(out_dir)
out_file = os.path.join(out_dir,
append_stem(os.path.basename(in_file), "test"))
with open(in_file) as in_handle, open(out_file, "w") as out_handle:
for line in islice(in_handle, lines):
out_handle.write(line)
return out_file
def _get_stage_config(config, stage):
return config["stage"][stage]
def _get_program(config, stage):
return config["stage"][stage]["program"]
def _emit_stage_message(stage, curr_files):
logger.info("Running %s on %s" % (stage, curr_files))
def main(config_file, view):
with open(config_file) as in_handle:
config = yaml.load(in_handle)
# make the needed directories
map(safe_makedir, config["dir"].values())
# specific for project
input_dir = config["dir"]["data"]
logger.info("Loading files from %s" % (input_dir))
input_files = list(locate("*.fq", input_dir))
input_files += list(locate("*.fastq", input_dir))
logger.info("Input files: %s" % (input_files))
results_dir = config["dir"]["results"]
safe_makedir(results_dir)
# make the stage repository
repository = StageRepository(config)
logger.info("Stages found: %s" % (repository.plugins))
if config.get("test_pipeline", False):
logger.info("Running a test pipeline on a subset of the reads.")
results_dir = os.path.join(results_dir, "test_pipeline")
config["dir"]["results"] = results_dir
safe_makedir(results_dir)
curr_files = map(make_test, input_files, [config] * len(input_files))
logger.info("Converted %s to %s. " % (input_files, curr_files))
else:
curr_files = input_files
logger.info("Running RNASeq alignment pipeline on %s." % (curr_files))
for stage in config["run"]:
if stage == "fastqc":
logger.info("Running fastqc on %s." % (curr_files))
stage_runner = FastQC(config)
view.map(stage_runner, curr_files)
if stage == "cutadapt":
curr_files = combine_pairs(curr_files)
logger.info("Running cutadapt on %s." % (curr_files))
stage_runner = Cutadapt(config)
curr_files = view.map(stage_runner, curr_files)
if stage == "tophat":
logger.info("Running Tophat on %s." % (curr_files))
#tophat = repository["tophat"](config)
tophat = Tophat(config)
tophat_outputs = view.map(tophat, curr_files)
sortsam = view.map(sam.coordinate_sort_sam, tophat_outputs,
[config] * len(tophat_outputs))
bamfiles = view.map(sam.sam2bam, sortsam)
bamsort = view.map(sam.bamsort, bamfiles)
view.map(sam.bamindex, bamsort)
final_bamfiles = bamsort
curr_files = tophat_outputs
if stage == "disambiguate":
logger.info("Disambiguating %s." % (curr_files))
disambiguate = repository[stage](config)
view.map(disambiguate, curr_files)
if stage == "htseq-count":
logger.info("Running htseq-count on %s." % (bamfiles))
name_sorted = view.map(sam.bam_name_sort, bamfiles)
curr_files = view.map(sam.bam2sam, name_sorted)
htseq_args = zip(*product(curr_files, [config], [stage]))
htseq_outputs = view.map(htseq_count.run_with_config,
*htseq_args)
htseq_count.combine_counts(htseq_outputs)
if stage == "rnaseq_metrics":
logger.info("Calculating RNASeq metrics on %s." % (curr_files))
#coverage = repository[stage](config)
coverage = RNASeqMetrics(config)
view.map(coverage, curr_files)
if stage == "hard_clip":
logger.info("Trimming from the beginning of reads on %s." % (curr_files))
hard_clipper = HardClipper(config)
curr_files = view.map(hard_clipper, curr_files)
if stage == "rseqc":
logger.info("Running rseqc on %s." % (curr_files))
#rseq_args = zip(*product(curr_files, [config]))
rseq_args = zip(*product(final_bamfiles, [config]))
view.map(rseqc.bam_stat, *rseq_args)
down_args = zip(*product(final_bamfiles, [40000000]))
down_bam = view.map(sam.downsample_bam, *down_args)
view.map(rseqc.genebody_coverage, down_bam,
[config] * len(down_bam))
view.map(rseqc.junction_annotation, *rseq_args)
view.map(rseqc.junction_saturation, *rseq_args)
RPKM_args = zip(*product(final_bamfiles, [config]))
RPKM_count_out = view.map(rseqc.RPKM_count, *RPKM_args)
RPKM_count_fixed = view.map(rseqc.fix_RPKM_count_file,
RPKM_count_out)
"""
annotate_args = zip(*product(RPKM_count_fixed,
["gene_id"],
["ensembl_gene_id"],
["human"]))
view.map(annotate.annotate_table_with_biomart,
*annotate_args)
"""
view.map(rseqc.RPKM_saturation, *rseq_args)
curr_files = tophat_outputs
if __name__ == "__main__":
main_config_file = sys.argv[1]
with open(main_config_file) as config_in_handle:
startup_config = yaml.load(config_in_handle)
setup_logging(startup_config)
cluster_config = startup_config["cluster"]
cores_per_job = cluster_config.get("cores_per_job", 1)
with cluster_view(cluster_config["scheduler"],
cluster_config["queue"],
cluster_config["cores"],
cores_per_job) as view:
main(main_config_file, view)
|
roryk/bipy
|
examples/rnaseq/rnaseq_pipeline.py
|
Python
|
mit
| 7,478
|
[
"HTSeq"
] |
2c3cac3920085b4a89156c1ac835253655fd73cc2df7cd4ff1ef3a6be215388f
|
from __future__ import division, print_function, absolute_import
import numpy as np
import itertools as itr
import amitgroup as ag
from pnet.layer import Layer
import pnet
import pnet.matrix
from scipy import linalg
from sklearn.utils.extmath import logsumexp, pinvh
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 1e-5)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
assert len(means) == len(covars)
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def score_samples(X, means, weights, covars, covariance_type='diag'):
lpr = (log_multivariate_normal_density(X, means, covars,
covariance_type)
+ np.log(weights))
logprob = logsumexp(lpr, axis=1)
#responsibilities = np.exp(lpr - logprob[:, np.newaxis])
log_resp = lpr - logprob[:, np.newaxis]
return logprob, log_resp
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in X under
each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
@Layer.register('oriented-gaussian-parts-layer')
class OrientedGaussianPartsLayer(Layer):
def __init__(self, n_parts=1, n_orientations=1, part_shape=(6, 6),
settings={}):
self._num_true_parts = n_parts
self._n_orientations = n_orientations
self._part_shape = part_shape
self._settings = dict(polarities=1,
n_iter=10,
n_init=1,
seed=0,
standardize=True,
standardization_epsilon=0.001,
samples_per_image=40,
max_samples=np.inf,
max_covariance_samples=None,
logratio_thresh=-np.inf,
std_thresh=0.002,
std_thresh_frame=0,
covariance_type='tied',
min_covariance=1e-3,
uniform_weights=True,
channel_mode='separate',
normalize_globally=False,
code_bkg=False,
whitening_epsilon=None,
min_count=5,
coding='hard',
)
self._min_log_prob = np.log(0.0005)
self._extra = {}
for k, v in settings.items():
if k not in self._settings:
raise ValueError("Unknown settings: {}".format(k))
else:
self._settings[k] = v
self._train_info = None
self._keypoints = None
self._means = None
self._covar = None
self._weights = None
self._visparts = None
self._whitening_matrix = None
self.w_epsilon = self._settings['whitening_epsilon']
@property
def num_parts(self):
return self._num_true_parts * self._n_orientations
@property
def part_shape(self):
return self._part_shape
def _prepare_covariance(self):
cov_type = self._settings['covariance_type']
if cov_type == 'tied':
covar = self._covar
elif cov_type == 'full-perm':
covar = np.concatenate([
np.tile(cov, (self._num_true_parts, 1, 1))
for cov in self._covar
])
elif cov_type == 'diag':
covar = self._covar.reshape((-1,) + self._covar.shape[-1:])
elif cov_type == 'diag-perm':
covar = np.concatenate([
np.tile(cov, (self._num_true_parts, 1))
for cov in self._covar
])
else:
covar = self._covar.reshape((-1,) + self._covar.shape[-2:])
return covar
def preprocess(self):
self._extra['loglike_thresh'] = -np.inf
if 0:
X0 = 0.0 * np.ones((1, np.prod(self._part_shape)))
covar = self._prepare_covariance()
K = self._means.shape[0]
logprob, _ = score_samples(X0,
self._means.reshape((K, -1)),
self._weights.ravel(),
covar,
self.gmm_cov_type,
)
bkg_means = self._extra['bkg_mean'].ravel()[np.newaxis]
bkg_covars = self._extra['bkg_covar'][np.newaxis]
bkg_weights = np.ones(1)
bkg_logprob, _ = score_samples(X0,
bkg_means,
bkg_weights,
bkg_covars,
'full',
)
@property
def pos_matrix(self):
return self.conv_pos_matrix(self._part_shape)
def __extract(self, X, covar, img_stds):
flatXij_patch = X.reshape((X.shape[0], -1))
if self._settings['normalize_globally']:
not_ok = (flatXij_patch.std(-1) / img_stds <= self._settings['std_thresh'])
else:
not_ok = (flatXij_patch.std(-1) <= self._settings['std_thresh'])
if self._settings['standardize']:
flatXij_patch = self._standardize_patches(flatXij_patch)
if self._settings['whitening_epsilon'] is not None:
flatXij_patch = self.whiten_patches(flatXij_patch)
K = self._means.shape[0]
logprob, log_resp = score_samples(flatXij_patch,
self._means.reshape((K, -1)),
self._weights.ravel(),
covar,
self.gmm_cov_type,
)
coding = self._settings['coding']
if coding == 'hard':
C = log_resp.argmax(-1)
if self._settings['code_bkg']:
bkg_part = self.num_parts
else:
bkg_part = -1
C[not_ok] = bkg_part
return C
elif coding == 'triangle':
#means = ag.apply_once(np.mean, resp, [1])
dist = -log_resp
means = ag.apply_once(np.mean, dist, [1])
f = np.maximum(means - dist, 0)
f[not_ok] = 0.0
return f
elif coding == 'soft':
f = np.maximum(self._min_log_prob, log_resp)
f[not_ok] = self._min_log_prob
return f
def extract(self, X):
assert self.trained, "Must be trained before calling extract"
channel_mode = self._settings['channel_mode']
ps = self._part_shape
if channel_mode == 'together':
C = 1
elif channel_mode == 'separate':
C = X.shape[-1]
dim = (X.shape[1]-ps[0]+1, X.shape[2]-ps[1]+1, C)
EX_N = min(10, len(X))
#ex_log_probs = np.zeros((EX_N,) + dim)
#ex_log_probs2 = []
covar = self._prepare_covariance()
img_stds = ag.apply_once(np.std, X, [1, 2, 3], keepdims=False)
#img_stds = None
coding = self._settings['coding']
if coding == 'hard':
feature_map = -np.ones((X.shape[0],) + dim, dtype=np.int64)
elif coding == 'triangle':
feature_map = np.zeros((X.shape[0],) + dim + (self.num_parts,), dtype=np.float32)
elif coding == 'soft':
feature_map = np.empty((X.shape[0],) + dim + (self.num_parts,), dtype=np.float32)
feature_map[:] = self._min_log_prob
for i, j in itr.product(range(dim[0]), range(dim[1])):
Xij_patch = X[:, i:i+ps[0], j:j+ps[1]]
if channel_mode == 'together':
feature_map[:, i, j, 0] = self.__extract(Xij_patch, covar,
img_stds)
elif channel_mode == 'separate':
for c in range(C):
f = self.__extract(Xij_patch[...,c], covar, img_stds)
#print(f.shape)
#print(f[:10])
assert f.dtype in [np.int64, np.int32]
f[f != -1] += c * self.num_parts
feature_map[:, i, j, c] = f
#self.__TEMP_ex_log_probs = ex_log_probs
#self.__TEMP_ex_log_probs2 = np.concatenate(ex_log_probs2)
if coding == 'hard':
num_features = self.num_parts * C
if self._settings['code_bkg']:
num_features += 1
return (feature_map, num_features)
else:
return (feature_map.reshape(feature_map.shape[:3] + (-1,)), self.num_parts * C)
@property
def trained(self):
return self._means is not None
# TODO: Needs work
def _standardize_patches(self, flat_patches):
means = np.apply_over_axes(np.mean, flat_patches, [1])
variances = np.apply_over_axes(np.var, flat_patches, [1])
epsilon = self._settings['standardization_epsilon']
return (flat_patches - means) / np.sqrt(variances + epsilon)
def whiten_patches(self, flat_patches):
return np.dot(self._whitening_matrix, flat_patches.T).T
def train(self, X, Y=None, OriginalX=None):
raw_originals, the_rest = self._get_patches(X)
self._train_info = {}
self._train_info['example_patches2'] = raw_originals[:10]
print(raw_originals.shape)
# Standardize them
old_raw_originals = raw_originals.copy()
if self._settings['standardize']:
mu = ag.apply_once(np.mean, raw_originals, [1, 2, 3, 4])
variances = ag.apply_once(np.var, raw_originals, [1, 2, 3, 4])
epsilon = self._settings['standardization_epsilon']
raw_originals = (raw_originals - mu) / np.sqrt(variances + epsilon)
pp = raw_originals.reshape((np.prod(raw_originals.shape[:2]), -1))
sigma = np.dot(pp.T, pp) / len(pp)
self._extra['sigma'] = sigma
if self.w_epsilon is not None:
U, S, _ = np.linalg.svd(sigma)
shrinker = np.diag(1 / np.sqrt(S + self.w_epsilon))
#self._whitening_matrix = U @ shrinker @ U.T
self._whitening_matrix = np.dot(U, np.dot(shrinker, U.T))
else:
self._whitening_matrix = np.eye(sigma.shape[0])
pp = self.whiten_patches(pp)
raw_originals = pp.reshape(raw_originals.shape)
print(raw_originals.shape)
self.train_from_samples(raw_originals, the_rest)
# TODO
if 0:
f = self.extract(lambda x: x, old_raw_originals[:,0])
feat = f[0].ravel()
ag.info('bincounts', np.bincount(feat[feat!=-1], minlength=f[1]))
self.preprocess()
@property
def gmm_cov_type(self):
"""The covariance type that should be used for sklearn's GMM"""
c = self._settings['covariance_type']
if c in ['full-full', 'full-perm']:
return 'full'
elif c in ['diag-perm', 'ones']:
return 'diag'
else:
return c
def rotate_indices(self, mm, ORI):
"""
"""
import scipy.signal
kern = np.array([[-1, 0, 1]]) / np.sqrt(2)
orientations = 8
for p in range(self._num_true_parts):
main_rot = None
for i, part_ in enumerate(mm.means_[p]):
part = part_.reshape(self._part_shape)
gr_x = scipy.signal.convolve(part, kern, mode='same')
gr_y = scipy.signal.convolve(part, kern.T, mode='same')
a = np.arctan2(gr_y[1:-1, 1:-1], gr_x[1:-1, 1:-1])
ori_index = orientations * (a + 1.5*np.pi) / (2 * np.pi)
indices = np.round(ori_index).astype(np.int64)
theta = (orientations - indices) % orientations
# This is not the most robust way, but it works
counts = np.bincount(theta.ravel(), minlength=8)
if counts.argmax() == 0:
main_rot = i
break
II = np.roll(np.arange(ORI), -main_rot)
mm.means_[p, :] = mm.means_[p, II]
mm.covars_[p, :] = mm.covars_[p, II]
mm.weights_[p, :] = mm.weights_[p, II]
def train_from_samples(self, raw_originals, the_rest):
ORI = self._n_orientations
POL = self._settings['polarities']
P = ORI * POL
def cycles(X):
c = [np.concatenate([X[i:], X[:i]]) for i in range(len(X))]
return np.asarray(c)
RR = np.arange(ORI)
PP = np.arange(POL)
II = [list(itr.product(PPi, RRi))
for PPi in cycles(PP)
for RRi in cycles(RR)]
lookup = dict(zip(itr.product(PP, RR), itr.count()))
n_init = self._settings['n_init']
n_iter = self._settings['n_iter']
seed = self._settings['seed']
covar_limit = self._settings['max_covariance_samples']
num_angle = ORI
d = np.prod(raw_originals.shape[2:])
permutation = np.empty((num_angle, num_angle * d), dtype=np.int_)
for a in range(num_angle):
if a == 2:
permutation[a] = np.arange(num_angle * d)
else:
permutation[a] = np.roll(permutation[a-1], d)
permutations = np.asarray([[lookup[ii] for ii in rows] for rows in II])
from pnet.permutation_gmm import PermutationGMM
mm = PermutationGMM(n_components=self._num_true_parts,
permutations=permutations,
n_iter=n_iter,
n_init=n_init,
random_state=seed,
thresh=1e-5,
covariance_type=self._settings['covariance_type'],
covar_limit=covar_limit,
min_covar=self._settings['min_covariance'],
params='wmc',
)
Xflat = raw_originals.reshape(raw_originals.shape[:2] + (-1,))
print(Xflat.shape)
mm.fit(Xflat)
comps = mm.predict(Xflat)
# Floating counts
from scipy.misc import logsumexp
logprob, log_resp = mm.score_block_samples(Xflat)
fcounts = np.exp(logsumexp(log_resp[...,0], axis=0))
def mean0(x):
if x.shape[0] == 0:
return np.zeros(x.shape[1:])
else:
return np.mean(x, axis=0)
visparts = np.asarray([
mean0(raw_originals[comps[:, 0] == k,
comps[comps[:, 0] == k][:, 1]])
for k in range(self._num_true_parts)
])
if 1:
EX_N = 50
ex_shape = ((self._num_true_parts, EX_N) + self._part_shape +
raw_originals.shape[4:])
ex_patches = np.empty(ex_shape)
ex_patches[:] = np.nan
for k in range(self._num_true_parts):
XX = raw_originals[comps[:, 0] == k]
rot = comps[comps[:, 0] == k, 1]
for n in range(min(EX_N, len(XX))):
ex_patches[k, n] = XX[n, rot[n]]
counts = np.bincount(comps[:, 0])
II = np.argsort(counts)[::-1]
ok = counts >= self._settings['min_count']
II = np.asarray([ii for ii in II if ok[ii]])
counts = counts[II]
fcounts = fcounts[II]
means = mm.means_[II]
weights = mm.weights_[II]
self._visparts = visparts[II]
self._train_info['example_patches'] = ex_patches[II]
np.seterr(all='raise')
n_components = len(II)
self._num_true_parts = len(II)
print('Kept', n_components, 'out of', mm.n_components)
# Covariance types that will need resorting
covtypes = ['diag', 'diag-perm', 'full', 'full-full']
if self._settings['covariance_type'] in covtypes:
covars = mm.covars_[II]
else:
covars = mm.covars_
ag.info(':counts', list(zip(counts, fcounts)))
# Example patches - initialize to NaN if component doesn't fill it up
# Rotate the parts into the canonical rotation
#if POL == 1 and False:
#self.rotate_indices(mm, ORI)
means_shape = (n_components * P,) + raw_originals.shape[2:]
ag.info('means_shape', means_shape)
self._means = means.reshape(means_shape)
self._covar = covars
if self._settings['uniform_weights']:
# Set weights to uniform
self._weights = np.ones(weights.shape)
self._weights /= np.sum(self._weights)
else:
self._weights = weights
self._train_info['counts'] = counts
def collapse12(Z):
return Z.reshape((-1,) + Z.shape[2:])
if len(the_rest) > 0:
XX = np.concatenate([collapse12(the_rest),
collapse12(raw_originals)])
else:
XX = collapse12(raw_originals)
def regularize_covar(cov, min_covar=0.001):
dd = np.diag(cov)
return cov + np.diag(dd.clip(min=min_covar) - dd)
self._extra['bkg_mean'] = XX.mean(0)
sample_cov = np.cov(XX.reshape((XX.shape[0], -1)).T)
self._extra['bkg_covar'] = np.diag(np.diag(sample_cov))
min_cov = self._settings['min_covariance']
self._extra['bkg_covar'] = regularize_covar(self._extra['bkg_covar'],
min_cov)
def _get_patches(self, X):
samples_per_image = self._settings['samples_per_image']
the_originals = []
the_rest = []
ag.info("Extracting patches from")
ps = self._part_shape
channel_mode = self._settings['channel_mode']
ORI = self._n_orientations
POL = self._settings['polarities']
assert POL in (1, 2), "Polarities must be 1 or 2"
# LEAVE-BEHIND
# Make it square, to accommodate all types of rotations
size = X.shape[1:3]
new_side = np.max(size)
new_size = [new_side + (new_side - X.shape[1]) % 2,
new_side + (new_side - X.shape[2]) % 2]
from skimage import transform
for n, img in enumerate(X):
#print(X.shape)
#print(img.shape, n)
img_padded = ag.util.pad_to_size(img, (new_size[0], new_size[1],) + X.shape[3:])
#print(img_padded.shape)
pad = [(new_size[i]-size[i])//2 for i in range(2)]
angles = np.arange(0, 360, 360 / ORI)
radians = angles*np.pi/180
all_img = np.asarray([
transform.rotate(img_padded,
angle,
resize=False,
mode='nearest')
for angle in angles])
# Add inverted polarity too
if POL == 2:
all_img = np.concatenate([all_img, 1-all_img])
rs = np.random.RandomState(0)
# Set up matrices that will translate a position in the canonical
# image to the rotated iamges. This way, we're not rotating each
# patch on demand, which will end up slower.
center_adjusts = [ps[0] % 2,
ps[1] % 2]
offset = (np.asarray(new_size) - center_adjusts) / 2
matrices = [pnet.matrix.translation(offset[0], offset[1]) *
pnet.matrix.rotation(a) *
pnet.matrix.translation(-offset[0], -offset[1])
for a in radians]
# Add matrices for the polarity flips too, if applicable
matrices *= POL
# This avoids hitting the outside of patches, even after rotating.
# The 15 here is fairly arbitrary
avoid_edge = int(1 + np.max(ps)*np.sqrt(2))
# These indices represent the center of patches
range_x = range(pad[0]+avoid_edge, pad[0]+img.shape[0]-avoid_edge)
range_y = range(pad[1]+avoid_edge, pad[1]+img.shape[1]-avoid_edge)
indices = list(itr.product(range_x, range_y))
rs.shuffle(indices)
i_iter = itr.cycle(iter(indices))
minus_ps = [-(ps[i]//2) for i in range(2)]
plus_ps = [minus_ps[i] + ps[i] for i in range(2)]
max_samples = self._settings['max_samples']
consecutive_failures = 0
# We want rotation of 90 deg to have exactly the same pixels. For
# this, we need adjust the center of the patch a bit before
# rotating.
std_thresh = self._settings['std_thresh']
img_std = np.std(img_padded)
ag.info('Image #{}, collected {} patches and rejected {} (std={}'.format(
n, len(the_originals), len(the_rest), img_std))
for sample in range(samples_per_image):
TRIES = 10000
for tries in range(TRIES):
x, y = next(i_iter)
fr = self._settings['std_thresh_frame']
sel0_inner = [0,
slice(x+minus_ps[0]+fr, x+plus_ps[0]-fr),
slice(y+minus_ps[1]+fr, y+plus_ps[1]-fr)]
if channel_mode == 'separate':
ii = rs.randint(X.shape[3])
sel0_inner += [ii]
from copy import copy
sel1_inner = copy(sel0_inner)
sel1_inner[0] = slice(None)
XY = np.array([x, y, 1])[:, np.newaxis]
# Now, let's explore all orientations
if channel_mode == 'together':
vispatch = np.zeros((ORI * POL,) + ps + X.shape[3:])
elif channel_mode == 'separate':
vispatch = np.zeros((ORI * POL,) + ps + (1,))
br = False
for ori in range(ORI * POL):
p = np.dot(matrices[ori], XY)
# The epsilon makes the truncation safer
ip = [int(round(float(p[i]))) for i in range(2)]
selection = [ori,
slice(ip[0] + minus_ps[0],
ip[0] + plus_ps[0]),
slice(ip[1] + minus_ps[1],
ip[1] + plus_ps[1])]
if channel_mode == 'separate':
selection += [slice(ii, ii+1)]
orig = all_img[selection]
try:
vispatch[ori] = orig
except ValueError:
br = True
break
if br:
continue
# Randomly rotate this patch, so that we don't bias
# the unrotated (and possibly unblurred) image
shift = rs.randint(ORI)
vispatch[:ORI] = np.roll(vispatch[:ORI], shift, axis=0)
if POL == 2:
vispatch[ORI:] = np.roll(vispatch[ORI:], shift, axis=0)
#if all_img[sel0_inner].std() > std_thresh:
all_stds = ag.apply_once(np.std,
all_img[sel1_inner],
[1, 2],
keepdims=False)
#if np.median(all_stds) > std_thresh:
#if np.median(all_stds) > std_thresh:
if self._settings['normalize_globally']:
ok = np.median(all_stds) / img_std > std_thresh
else:
ok = np.median(all_stds) > std_thresh
if ok:
the_originals.append(vispatch)
if len(the_originals) % 500 == 0:
ag.info('Samples {}/{}'.format(len(the_originals),
max_samples))
if len(the_originals) >= max_samples:
return (np.asarray(the_originals),
np.asarray(the_rest))
consecutive_failures = 0
break
else:
the_rest.append(vispatch)
if tries == TRIES-1:
ag.info('WARNING: {} tries'.format(TRIES))
ag.info('cons', consecutive_failures)
consecutive_failures += 1
if consecutive_failures >= 10:
# Just give up.
raise ValueError('FATAL ERROR: Threshold is '
'probably too high (in {})'
.format(self.__class__.__name__))
return np.asarray(the_originals), np.asarray(the_rest)
def _vzlog_output_(self, vz):
pass
def save_to_dict(self):
d = {}
d['num_true_parts'] = self._num_true_parts
d['num_orientations'] = self._n_orientations
d['part_shape'] = self._part_shape
d['settings'] = self._settings
d['train_info'] = self._train_info
d['extra'] = self._extra
d['whitening_matrix'] = self._whitening_matrix
d['means'] = self._means
d['covar'] = self._covar
d['weights'] = self._weights
d['visparts'] = self._visparts
return d
@classmethod
def load_from_dict(cls, d):
# This code is just temporary {
num_true_parts = d.get('num_true_parts')
if num_true_parts is None:
num_true_parts = int(d['num_parts'] // d['num_orientations'])
# }
obj = cls(num_true_parts,
d['num_orientations'],
d['part_shape'],
settings=d['settings'])
obj._means = d['means']
obj._covar = d['covar']
obj._weights = d.get('weights')
obj._visparts = d.get('visparts')
obj._train_info = d.get('train_info')
obj._extra = d.get('extra', {})
obj._whitening_matrix = d['whitening_matrix']
#
obj.preprocess()
return obj
def __repr__(self):
return ('OrientedPartsLayer(num_true_parts={num_parts}, '
'n_orientations={n_orientations}, '
'part_shape={part_shape}, '
'settings={settings})'
).format(num_parts=self._num_true_parts,
n_orientations=self._n_orientations,
part_shape=self._part_shape,
settings=self._settings)
|
jiajunshen/partsNet
|
pnet/oriented_gaussian_parts_layer.py
|
Python
|
bsd-3-clause
| 30,760
|
[
"Gaussian"
] |
447516162813791702ed693bb53a709bcbb56cf45ce75c1a409f6ed8ac39c6a9
|
# -*- coding: latin-1 -*-
#
# Copyright (c) 2015-2016 Paul Bomke
# Distributed under the GNU GPL v2.
#
# This file is part of monkeyprint.
#
# monkeyprint is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# monkeyprint is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You have received a copy of the GNU General Public License
# along with monkeyprint. If not, see <http://www.gnu.org/licenses/>.
import monkeyprintModelViewer
import monkeyprintGuiHelper
import monkeyprintSerial
import monkeyprintPrintProcess
import monkeyprintSocketCommunication
import subprocess # Needed to call avrdude.
import vtk
import threading
import Queue
import time
import signal
import zmq
import os
import shutil
# New imports for QT.
import signal
import sys
import PyQt4
from PyQt4 import QtGui, QtCore, Qt
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
################################################################################
# Define a class for standalone without main GUI. ##############################
################################################################################
# Inherit from projector display window.
'''
class noGui(monkeyprintGuiHelper.projectorDisplay):
# Override init function. #################################################
def __init__(self, programSettings, modelCollection):
# Initialise base class gtk window.********************
monkeyprintGuiHelper.projectorDisplay.__init__(self, programSettings, modelCollection)
# Set function for window close event.
self.connect("delete-event", self.on_closing, None)
# Show the window.
self.show()
# Internalise parameters.******************************
self.modelCollection = modelCollection
self.programSettings = programSettings
# Create queues for inter-thread communication.********
# Queue for setting print progess bar.
self.queueSliceOut = Queue.Queue(maxsize=1)
self.queueSliceIn = Queue.Queue(maxsize=1)
# Queue for status infos displayed above the status bar.
self.queueStatus = Queue.Queue()
# Queue for console messages.
self.queueConsole = Queue.Queue()
# Queue list.
self.queues = [ self.queueSliceOut,
self.queueStatus ]
# Allow background threads.****************************
# Very important, otherwise threads will be
# blocked by gui main thread.
gtk.gdk.threads_init()
# Add thread listener functions to run every n ms.****
# Check if the slicer threads have finished.
# slicerListenerId = gobject.timeout_add(100, self.modelCollection.checkSlicerThreads)
# Update the progress bar, projector image and 3d view. during prints.
pollPrintQueuesId = gobject.timeout_add(50, self.pollPrintQueues)
# Create additional variables.*************************
# Flag to set during print process.
self.printRunning = True
self.programSettings['printOnRaspberry'].value = True
# Create the print window.
# self.projectorDisplay = monkeyprintGuiHelper.projectorDisplay(self.programSettings, self.modelCollection)
# Create the print process thread.
self.printProcess = monkeyprintPrintProcess.printProcess(self.modelCollection, self.programSettings, self.queueSliceOut, self.queueSliceIn, self.queueStatus, self.queueConsole)
# Start the print process.
self.printProcess.start()
# Start main loop.
self.main()
def pollPrintQueues(self):
# If slice number queue has slice number...
if self.queueSliceOut.qsize():
sliceNumber = self.queueSliceOut.get()
# Set slice view to given slice. If sliceNumber is -1 black is displayed.
#if self.projectorDisplay != None:
# self.projectorDisplay.updateImage(sliceNumber)
self.updateImage(sliceNumber)
# Set slice in queue to true as a signal to print process thread that it can start waiting.
self.queueSliceIn.put(True)
# If print info queue has info...
if self.queueStatus.qsize():
#self.progressBar.setText(self.queueStatus.get())
message = self.queueStatus.get()
if message == "destroy":
self.printRunning = False
del self.printProcess
gtk.main_quit()
self.destroy()
del self
return False
else:
return True
# Return true, otherwise function won't run again.
return True
def on_closing(self, widget, event, data):
# Get all threads.
runningThreads = threading.enumerate()
# End kill threads. Main gui thread is the first...
for i in range(len(runningThreads)):
if i != 0:
runningThreads[-1].join(timeout=10000) # Timeout in ms.
print "Slicer thread " + str(i) + " finished."
del runningThreads[-1]
# Save settings to file.
self.programSettings.saveFile()
# Remove temp directory.on_closing(
shutil.rmtree(self.programSettings['tmpDir'].value, ignore_errors=True)
# Terminate the gui.
gtk.main_quit()
return False # returning False makes "destroy-event" be signalled to the window
# Gui main function. ######################################################
def main(self):
# All PyGTK applications must have a gtk.main(). Control ends here
# and waits for an event to occur (like a key press or mouse event).
gtk.main()
'''
################################################################################
# Define a class for the main GUI. #############################################
################################################################################
class gui(QtGui.QApplication):
# *************************************************************************
# Override init function. *************************************************
# *************************************************************************
def __init__(self, modelCollection, programSettings, console=None, filename=None, *args, **kwargs):
# Call super class function.
super(gui, self).__init__(sys.argv)
# Show splash.
if not programSettings['debug'].getValue():
t = 3
versionString = "Monkeyprint version " + str(programSettings['versionMajor'].getValue()) + "." + str(programSettings['versionMinor'].getValue()) + "." + str(programSettings['revision'].getValue())
splash = monkeyprintGuiHelper.splashScreen(programSettings['installDir'].getValue() + '/logo.png', duration=t, infoString=versionString)
self.processEvents()
time.sleep(t)
# Internalise parameters.
self.modelCollection = modelCollection
self.programSettings = programSettings
self.console = console
# Create signal for Strg+C TODO: check out why this works.
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Flag to set during print process.
self.printRunning = False
self.slicerRunning = False
# Create queues for inter-thread communication.
# Queue for setting print progess bar.
self.queueSliceOut = Queue.Queue(maxsize=1)
self.queueSliceIn = Queue.Queue(maxsize=1)
self.slicerFinished = False
# Queues for controlling the file transmission thread.
self.queueFileTransferIn = Queue.Queue(maxsize=1)
self.queueFileTransferOut = Queue.Queue(maxsize=1)
# Queue for print process commands.
# Queue for status infos displayed above the status bar.
self.queueStatus = Queue.Queue()
# Queue for commands sent to print process.
self.queueCommands = Queue.Queue(maxsize=1)
# Queue for console messages.
self.queueConsole = Queue.Queue()
# Is this running from Raspberry Pi or from PC?
self.runningOnRasPi = False
# TODO: Use this flag to combine this class and server class.
# Get current working directory and set paths.
self.cwd = os.getcwd()
self.programSettings['localMkpPath'].value = self.cwd + "/currentPrint.mkp"
# ********************************************************************
# Add thread listener functions to run every n ms.********************
# ********************************************************************
# Check if the slicer threads and/or slice combiner thread have finished. Needed in model collection.
#self.timerSlicerListener = QtCore.QTimer()
#self.timerSlicerListener.timeout.connect(self.modelCollection.checkSlicer)
#self.timerSlicerListener.start(100)
# Check if slicer has finished. Needed to update slice preview.
self.timerSlicePreviewUpdater = QtCore.QTimer()
self.timerSlicePreviewUpdater.timeout.connect(self.checkSlicer)#self.updateSlicerStatus)
self.timerSlicePreviewUpdater.start(100)
# Update the progress bar, projector image and 3d view during prints.
self.timerPrintQueueListener = QtCore.QTimer()
self.timerPrintQueueListener.timeout.connect(self.checkPrintQueues)
self.timerPrintQueueListener.start(50)
# Request status info from raspberry pi.
# pollRasPiConnectionId = gobject.timeout_add(500, self.pollRasPiConnection)
# Request status info from slicer.
#pollSlicerStatusId = gobject.timeout_add(100, self.pollSlicerStatus)
# TODO: combine this with slicerListener.
# ********************************************************************
# Create the main GUI. ***********************************************
# ********************************************************************
#The Main window
self.mainWindow = monkeyprintGuiHelper.mainWindow(self)
self.mainWindow.showMaximized()
self.centralWidget = QtGui.QWidget()
self.mainWindow.setCentralWidget(self.centralWidget)
# Create main box inside of window. This will hold the menu bar and the rest.
self.boxMain = QtGui.QHBoxLayout()
self.centralWidget.setLayout(self.boxMain)
# Create menu bar and pack inside main box at top.
self.menuBar = self.createMenuBar()#menuBar(self.programSettings, self.on_closing)
#self.boxMain.addWidget(self.menuBar)
# self.boxMain.pack_start(self.menuBar, expand=False, fill=False)
# self.menuBar.show()
# Create render box and pack inside work area box.
self.renderView = monkeyprintModelViewer.renderView(self.programSettings)
#self.renderView.show()
self.boxMain.addWidget(self.renderView)
self.renderView.renderWindowInteractor.Initialize()
# Create settings box.
# Make widget first to be able to control the width.
widgetSettings = QtGui.QWidget()
widgetSettings.setFixedWidth(250)
self.boxMain.addWidget(widgetSettings)
self.boxSettings =self.createSettingsBox()
widgetSettings.setLayout(self.boxSettings)
# Update.
self.updateAllEntries()
# self.createButtons()
self.mainWindow.show()
self.mainWindow.raise_()
def closeNicely(self):
# Save settings to file.
self.programSettings.saveFile('./')
# Get all threads.
runningThreads = threading.enumerate()
# End threads except for main gui thread.
for i in range(len(runningThreads)):
if runningThreads[i].getName() != "MainThread":
runningThreads[i].stop()
try:
runningThreads[i].join(timeout=1000) # Timeout in ms.
except RuntimeError:
print "Failed to join background thread."
else:
print "Background thread " + str(i) + " finished."
# Clean up files.
if os.path.isfile(self.programSettings['localMkpPath'].value):
os.remove(self.programSettings['localMkpPath'].value)
# Remove temp directory.
shutil.rmtree(self.programSettings['tmpDir'].value, ignore_errors=True)
def checkPrinterRunning(self):
return self.printRunning
# *************************************************************************
# Function that updates all relevant GUI elements during prints. **********
# *************************************************************************
# This runs every 100 ms as a gobject timeout function.
# Updates 3d view and projector view. Also forwards status info.
def checkPrintQueues(self):
# Check the queues...
# If slice number queue has slice number...
if self.queueSliceOut.qsize():
# ... get it from the queue.
sliceNumber = self.queueSliceOut.get()
# If it's an actual slice number...
if sliceNumber >=0:
# Set 3d view to given slice.
self.modelCollection.updateAllSlices3d(sliceNumber)
self.renderView.render()
# Set slice view to given slice. If sliceNumber is -1 black is displayed.
# Only if not printing from raspberry. In this case the projector display will not exist.
if self.projectorDisplay != None:
self.projectorDisplay.updateImage(sliceNumber)
# Update slice preview in the gui.
self.sliceView.updateImage(sliceNumber)
# Signal to print process that slice image is set and exposure time can begin.
if self.queueSliceIn.empty():
self.queueSliceIn.put(True)
# If status queue has info...
if self.queueStatus.qsize():
# ... get the status.
message = self.queueStatus.get()
#print message
# Check if this is the destroy message for terminating the print window.
if message == "destroy":
# If running on Raspberry, destroy projector display and clean up files.
if self.runningOnRasPi:
print "Print process finished! Idling..."
self.printRunning = False
del self.printProcess
self.projectorDisplay.destroy()
del self.projectorDisplay
# Remove print file.
if os.path.isfile(self.localPath + self.localFilename):
os.remove(self.localPath + self.localFilename)
# If not running on Raspberry Pi, destroy projector display and reset GUI.
else:
self.buttonPrintStart.setEnabled(True)
self.buttonPrintStop.setEnabled(False)
self.modelCollection.updateAllSlices3d(0)
self.renderView.render()
self.progressBar.updateValue(0)
self.printRunning = False
del self.printProcess
self.projectorDisplay.destroy()
del self.projectorDisplay
else:
# If running on Raspberry forward the message to the socket connection.
if self.runningOnRasPi:
self.socket.sendMulti("status", message)
# If not, update the GUI.
else:
self.processStatusMessage(message)
# print message
# Poll the command queue.
# Only do this when running on Raspberry Pi.
# If command queue has info...
if self.queueCommands.qsize():
# ... get the command.
command = self.queueCommands.get()
self.processCommandMessage(command)
# If console queue has info...
if self.queueConsole.qsize():
if self.console != None:
self.console.addLine(self.queueConsole.get())
# Return true, otherwise function won't run again.
return True
# *************************************************************************
# Function to process output of commandQueue and control print process. ***
# *************************************************************************
def processCommandMessage(self, message):
# Only needed on Rasperry Pi.
#if self.runningOnRasPi:
# Split the string.
command, parameter = message.split(":")
if command == "start":
if self.printRunning:
pass
# TODO: Send error message.
#zmq_socket.send_multipart(["error", "Print running already."])
else:
# Start the print.
self.printProcessStart()
elif command == "stop":
print "command: stop"
if self.printRunning:
self.printProcessStop()
#self.printProcess.stop()
elif command == "pause":
if self.printRunning:
self.printProcess.pause()
# *************************************************************************
# Function to process the output of statusQueue and update the GUI. *******
# *************************************************************************
def processStatusMessage(self, message):
# Split the string.
status, param, value = message.split(":")
# Check the status and retreive other data.
printRunning = True
if status == "slicing":
if param == "nSlices":
# Set number of slices for status bar.
self.progressBar.setLimit(int(value))
elif param == "slice":
# Set current slice in status bar.
currentSlice = int(value)
# TODO get current slice, this will work once slicer thread returns single slices.
if not self.queueSliceOut.qsize():
self.queueSliceOut.put(int(currentSlice))
self.progressBar.setText("Slicing.")
elif status == "preparing":
if param == "nSlices":
self.progressBar.setLimit(int(value))
if param == "homing":
self.progressBar.setText("Homing build platform.")
if param == "bubbles":
self.progressBar.setText("Removing bubbles.")
elif status == "printing":
if param == "nSlices":
# Set number of slices for status bar.
self.progressBar.setLimit(int(value))
if param == "slice":
# Set current slice in status bar.
self.progressBar.updateValue(int(value))
self.progressBar.setText("Printing slice " + value + ".")
if not self.queueSliceOut.qsize():
self.queueSliceOut.put(int(value))
elif status == "stopping":
self.progressBar.setText("Stopping print.")
elif status == "paused":
self.progressBar.setText("Print paused.")
elif status == "stopped":
if param == "slice":
if value == 1:
self.progressBar.setText("Print stopped after " + value + " slice.")
else:
self.progressBar.setText("Print stopped after " + value + " slices.")
else:
self.progressBar.setText("Print stopped.")
# Reset stop button to insensitive.
self.buttonPrintStart.setEnabled(True)
self.buttonPrintStop.setEnabled(False)
elif status == "idle":
if param == "slice":
self.progressBar.updateValue(int(value))
printRunning = False
self.progressBar.setText("Idle.")
# Reset gui sensitivities.
self.setGuiState(3)
self.printRunning = printRunning
# **************************************************************************
# Gui main function. *******************************************************
# **************************************************************************
def main(self):
# Run the QT event loop.
self.exec_()
# **************************************************************************
# Gui setup functions. *****************************************************
# **************************************************************************
# Create the top menu (files, settings etc). *******************************
def createMenuBar(self):
# TODO: check shortcuts.
# Create menu.
bar = self.mainWindow.menuBar()
# Create file menu.
menuFile = bar.addMenu("File")
# Item load.
menuItemLoad = QtGui.QAction("Load project",self)
menuFile.addAction(menuItemLoad)
menuItemLoad.setEnabled(False)
# Item save.
menuItemSave = QtGui.QAction("Save project",self)
menuFile.addAction(menuItemSave)
menuItemSave.setEnabled(False)
# Item close.
menuItemClose = QtGui.QAction("Close project",self)
menuFile.addAction(menuItemClose)
menuItemClose.setEnabled(False)
# Item quit.
menuItemQuit = QtGui.QAction("Quit",self)
menuFile.addAction(menuItemQuit)
menuItemQuit.triggered.connect(self.callbackMenuQuit)
# Create options menu.
menuOptions = bar.addMenu("Options")
# Item settings.
menuItemSettings = QtGui.QAction("Settings",self)
menuOptions.addAction(menuItemSettings)
menuItemSettings.triggered.connect(self.callbackMenuSettings)
# Item firmware.
#menuItemFirmware = QtGui.QAction("Flash firmware",self)
#menuOptions.addAction(menuItemFirmware)
# Item manual control.
menuItemManualControl = QtGui.QAction("Manual control",self)
menuOptions.addAction(menuItemManualControl)
menuItemManualControl.setEnabled(False)
return bar
def callbackMenuQuit(self):
self.mainWindow.close()
def callbackMenuSettings(self):
dialogSettings(self.programSettings, parent=self)
# Create the main settings box next to the render view. ********************
def createSettingsBox(self):
# Create settings box.
boxSettings = QtGui.QVBoxLayout()
boxSettings.setSpacing(5)
boxSettings.setContentsMargins(0,0,0,0)
# Create model list view.
# Label.
labelModelTableView = QtGui.QLabel('<b>Models</b>')
boxSettings.addWidget(labelModelTableView, 0, QtCore.Qt.AlignTop)
# Table view.
self.modelTableView = monkeyprintGuiHelper.modelTableView(self.programSettings, self.modelCollection, self.console, self)
boxSettings.addWidget(self.modelTableView, 0)
#boxSettings.addWidget(self.modelTableView)
# Create settings notebook.
self.notebookSettings = monkeyprintGuiHelper.notebook()
self.notebookSettings.addTab(self.createSettingsModel(), 'Model')
self.notebookSettings.addTab(self.createSettingsSupports(), 'Supports')
self.notebookSettings.addTab(self.createSettingsSlicing(), 'Slicing')
self.notebookSettings.addTab(self.createSettingsPrint(),'Print')
# Add notebook to gui.
boxSettings.addWidget(self.notebookSettings)
# Set tab switch functions.
self.notebookSettings.setCustomFunction(0, self.tabSwitchModelUpdate)
self.notebookSettings.setCustomFunction(1, self.tabSwitchSupportsUpdate)
self.notebookSettings.setCustomFunction(2, self.tabSwitchSlicesUpdate)
self.notebookSettings.setCustomFunction(3, self.tabSwitchPrintUpdate)
# Set gui state. This controls which tabs are clickable.**************
# 0: Model modifications active.
# 1: Model modifications, supports and slicing active.
# 2: All active.
# Use setGuiState function to set the state. Do not set manually.
self.setGuiState(0)
# Create output log.
self.consoleView = monkeyprintGuiHelper.consoleView(self.console)
boxSettings.addLayout(self.consoleView)
return boxSettings
# Create the model, supports, slicer and print page for the notebook. ******
def createSettingsModel(self):
# Create widget.
tabSettingsModel = QtGui.QWidget()
# Create main tab box.
boxSettingsModel = QtGui.QVBoxLayout()
tabSettingsModel.setLayout(boxSettingsModel)
# Create model modifications frame.
frameSettingsModel = QtGui.QGroupBox("Model modifications")
frameSettingsModel.setFlat(False)
boxSettingsModel.addWidget(frameSettingsModel)
# Create model modifications box.
boxModelModifications = QtGui.QVBoxLayout()
frameSettingsModel.setLayout(boxModelModifications)
# Create entries.
# Scaling.
self.entryScaling = monkeyprintGuiHelper.entry('scaling', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries])
boxModelModifications.addWidget(self.entryScaling)
# Rotation.
self.entryRotationX = monkeyprintGuiHelper.entry('rotationX', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxModelModifications.addWidget(self.entryRotationX)
self.entryRotationY = monkeyprintGuiHelper.entry('rotationY', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxModelModifications.addWidget(self.entryRotationY)
self.entryRotationZ = monkeyprintGuiHelper.entry('rotationZ', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxModelModifications.addWidget(self.entryRotationZ)
# Position.
self.entryPositionX = monkeyprintGuiHelper.entry('positionX', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxModelModifications.addWidget(self.entryPositionX)
self.entryPositionY = monkeyprintGuiHelper.entry('positionY', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxModelModifications.addWidget(self.entryPositionY)
# Bottom clearance.
self.entryBottomClearance = monkeyprintGuiHelper.entry('bottomClearance', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxModelModifications.addWidget(self.entryBottomClearance)
boxSettingsModel.addStretch()
return tabSettingsModel
def createSettingsSupports(self):
# Create widget.
tabSettingsSupports = QtGui.QWidget()
# Create main tab box.
boxSettingsSupports = QtGui.QVBoxLayout()
#boxSettingsSupports.setContentsMargins(0,0,0,0)
tabSettingsSupports.setLayout(boxSettingsSupports)
# Create support pattern frame.
frameSettingsSupportPattern = QtGui.QGroupBox("Support pattern")
boxSettingsSupports.addWidget(frameSettingsSupportPattern)
# Create support pattern box.
boxSupportPattern = QtGui.QVBoxLayout()
frameSettingsSupportPattern.setLayout(boxSupportPattern)
# Create entries.
# Overhang angle.
self.entryOverhangAngle = monkeyprintGuiHelper.entry('overhangAngle', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxSupportPattern.addWidget(self.entryOverhangAngle, 0)
# Spacing.
self.entrySupportSpacingX = monkeyprintGuiHelper.entry('spacingX', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxSupportPattern.addWidget(self.entrySupportSpacingX)
self.entrySupportSpacingY = monkeyprintGuiHelper.entry('spacingY', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxSupportPattern.addWidget(self.entrySupportSpacingY, 0)
# Max height.
self.entrySupportMaxHeight = monkeyprintGuiHelper.entry('maximumHeight', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxSupportPattern.addWidget(self.entrySupportMaxHeight, 0)
# Bottom plate thickness.
self.entrySupportBottomPlateThickness = monkeyprintGuiHelper.entry('bottomPlateThickness', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries] )
boxSupportPattern.addWidget(self.entrySupportBottomPlateThickness, 0)
# Create support geometry frame.
frameSettingsSupportGeo = QtGui.QGroupBox("Support geometry")
boxSettingsSupports.addWidget(frameSettingsSupportGeo)
# Create support geometry box.
boxSupportGeo = QtGui.QVBoxLayout()
frameSettingsSupportGeo.setLayout(boxSupportGeo)
# Position.
self.entrySupportBaseDiameter = monkeyprintGuiHelper.entry('baseDiameter', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries])
boxSupportGeo.addWidget(self.entrySupportBaseDiameter)
self.entrySupportTipDiameter = monkeyprintGuiHelper.entry('tipDiameter', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries])
boxSupportGeo.addWidget(self.entrySupportTipDiameter)
# Bottom clearance.
self.entrySupportTipHeight = monkeyprintGuiHelper.entry('coneHeight', modelCollection=self.modelCollection, customFunctions=[self.updateCurrentModel, self.renderView.render, self.updateAllEntries])
boxSupportGeo.addWidget(self.entrySupportTipHeight)
return tabSettingsSupports
def createSettingsSlicing(self):
# Create widget.
tabSettingsSlicing = QtGui.QWidget()
# Create main tab box.
boxSettingsSlicing = QtGui.QVBoxLayout()
boxSettingsSlicing.setContentsMargins(3,3,3,3)
tabSettingsSlicing.setLayout(boxSettingsSlicing)
# Create slicing parameters frame.
frameSettingsSlicingParameters = QtGui.QGroupBox("Slicing parameters")
boxSettingsSlicing.addWidget(frameSettingsSlicingParameters)
# Create slicing parameters box.
boxSlicingParameters = QtGui.QVBoxLayout()
boxSlicingParameters.setContentsMargins(0,3,0,3)
frameSettingsSlicingParameters.setLayout(boxSlicingParameters)
# Create entries.
# Layer height.
self.entryLayerHeight = monkeyprintGuiHelper.entry('layerHeight', settings=self.programSettings, customFunctions=[self.modelCollection.updateSliceStack, self.updateSlider, self.renderView.render, self.updateAllEntries])
boxSlicingParameters.addWidget(self.entryLayerHeight)
# Create fill parameters frame.
frameSettingsFillParameters = QtGui.QGroupBox("Fill parameters")
boxSettingsSlicing.addWidget(frameSettingsFillParameters)
# Create fill parameters box.
boxFillParameters = QtGui.QVBoxLayout()
boxFillParameters.setContentsMargins(0,3,0,3)
frameSettingsFillParameters.setLayout(boxFillParameters)
# Create fill parameters checkbox box.
boxFillParameterToggles = QtGui.QHBoxLayout()
boxFillParameterToggles.setContentsMargins(0,0,0,0)
boxFillParameters.addLayout(boxFillParameterToggles)
# Create toggle buttons.
# Hollow.
self.toggleButtonHollow = monkeyprintGuiHelper.toggleButton('printHollow', modelCollection=self.modelCollection, customFunctions=[self.modelCollection.updateSliceStack])#, self.updateSlider, self.renderView.render, self.updateAllEntries, self.updateSlicingEntries])
boxFillParameterToggles.addWidget(self.toggleButtonHollow, 0, QtCore.Qt.AlignLeft)
# Fill.
self.toggleButtonFill = monkeyprintGuiHelper.toggleButton('fill', modelCollection=self.modelCollection, customFunctions=[self.modelCollection.updateSliceStack])#, self.updateSlider, self.renderView.render, self.updateAllEntries, self.updateSlicingEntries])
boxFillParameterToggles.addWidget(self.toggleButtonFill, 0, QtCore.Qt.AlignLeft)
# Create entries.
# Position.
self.entryFillShellThickness = monkeyprintGuiHelper.entry('fillShellWallThickness', modelCollection=self.modelCollection, customFunctions=[self.modelCollection.updateSliceStack, self.updateSlider, self.renderView.render, self.updateAllEntries])
boxFillParameters.addWidget(self.entryFillShellThickness)
self.entryFillSpacing = monkeyprintGuiHelper.entry('fillSpacing', modelCollection=self.modelCollection, customFunctions=[self.modelCollection.updateSliceStack, self.updateSlider, self.renderView.render, self.updateAllEntries])
boxFillParameters.addWidget(self.entryFillSpacing)
# Bottom clearance.
self.entryFillThickness = monkeyprintGuiHelper.entry('fillPatternWallThickness', modelCollection=self.modelCollection, customFunctions=[self.modelCollection.updateSliceStack, self.updateSlider, self.renderView.render, self.updateAllEntries])
boxFillParameters.addWidget(self.entryFillThickness)
# Create preview frame.
frameSlicePreview = QtGui.QGroupBox("Slice preview")
boxSettingsSlicing.addWidget(frameSlicePreview)
# Create fill parameters box.
boxSlicePreview = QtGui.QVBoxLayout()
frameSlicePreview.setLayout(boxSlicePreview)
self.sliceSlider = monkeyprintGuiHelper.imageSlider(modelCollection=self.modelCollection, programSettings=self.programSettings, width=200, console=self.console, customFunctions=[self.modelCollection.updateAllSlices3d, self.renderView.render])
boxSlicePreview.addLayout(self.sliceSlider)
# Create save image stack frame.
frameSaveSlices = QtGui.QGroupBox("Save slice images")
boxSettingsSlicing.addWidget(frameSaveSlices)
boxSaveSlices = QtGui.QVBoxLayout()
boxSettingsSlicing.addLayout(boxSaveSlices)
self.buttonSaveSlices = QtGui.QPushButton("Save")
self.buttonSaveSlices.clicked.connect(self.callbackSaveSlices)
self.buttonSaveSlices.setEnabled(False)
boxSaveSlices.addWidget(self.buttonSaveSlices)
return tabSettingsSlicing
def createSettingsPrint(self):
# Create widget.
tabSettingsPrint = QtGui.QWidget()
# Create main tab box.
boxSettingsPrint = QtGui.QVBoxLayout()
#boxSettingsPrint.setContentsMargins(0,0,0,0)
tabSettingsPrint.setLayout(boxSettingsPrint)
# Create slicing parameters frame.
frameSettingsPrintParameters = QtGui.QGroupBox("Print parameters")
boxSettingsPrint.addWidget(frameSettingsPrintParameters)
# Create print parameters box.
boxPrintParameters = QtGui.QVBoxLayout()
boxPrintParameters.setContentsMargins(0,3,0,3)
frameSettingsPrintParameters.setLayout(boxPrintParameters)
# Create entries.
self.entryExposure = monkeyprintGuiHelper.entry('exposureTime', settings=self.programSettings)
boxPrintParameters.addWidget(self.entryExposure)
self.entryExposureBase = monkeyprintGuiHelper.entry('exposureTimeBase', settings=self.programSettings)
boxPrintParameters.addWidget(self.entryExposureBase)
self.entryNumberOfBaseLayers = monkeyprintGuiHelper.entry('numberOfBaseLayers', settings=self.programSettings)
boxPrintParameters.addWidget(self.entryNumberOfBaseLayers)
# self.entrySettleTime = monkeyprintGuiHelper.entry('Resin settle time', settings=self.programSettings)
# self.boxPrintParameters.pack_start(self.entrySettleTime, expand=True, fill=True)
# Create model volume frame.
frameResinVolume = QtGui.QGroupBox("Resin volume")
boxSettingsPrint.addWidget(frameResinVolume)
# Create print parameters box.
boxResinVolume = QtGui.QVBoxLayout()
boxResinVolume.setContentsMargins(0,3,0,3)
frameResinVolume.setLayout(boxResinVolume)
# Resin volume label.
self.resinVolumeLabel = QtGui.QLabel("Volume: ")
boxResinVolume.addWidget(self.resinVolumeLabel, 0, QtCore.Qt.AlignHCenter)
# Create model volume frame.
framePrintControl = QtGui.QGroupBox("Print control")
boxSettingsPrint.addWidget(framePrintControl)
# Create print control box.
boxPrintControl = QtGui.QHBoxLayout()
boxPrintControl.setContentsMargins(0,3,0,3)
boxPrintControl.setSpacing(0)
framePrintControl.setLayout(boxPrintControl)
boxPrintControl.setSpacing(0)
# Create print control buttons.
self.buttonPrintStart = QtGui.QPushButton('Print')
self.buttonPrintStart.setMaximumSize(QtCore.QSize(40,23))
self.buttonPrintStart.clicked.connect(self.callbackStartPrintProcess)
boxPrintControl.addWidget(self.buttonPrintStart)
self.buttonPrintStop = QtGui.QPushButton('Stop')
self.buttonPrintStop.setMaximumSize(QtCore.QSize(40,23))
self.buttonPrintStop.clicked.connect(self.callbackStopPrintProcess)
self.buttonPrintStop.setEnabled(False)
boxPrintControl.addWidget(self.buttonPrintStop)
# Create progress bar.
self.progressBar = monkeyprintGuiHelper.printProgressBar()
boxPrintControl.addWidget(self.progressBar)
# Create preview frame.
frameProjectorView = QtGui.QGroupBox("Projector view")
boxSettingsPrint.addWidget(frameProjectorView)
boxProjectorView = QtGui.QHBoxLayout()
frameProjectorView.setLayout(boxProjectorView)
# Create slice image.
self.sliceView = monkeyprintGuiHelper.imageView(settings=self.programSettings, modelCollection=self.modelCollection, mode='full', width=self.programSettings['previewSliceWidth'].value)
boxProjectorView.addWidget(self.sliceView)
return tabSettingsPrint
# **************************************************************************
# Gui update functions. *****************************************************
# **************************************************************************
# *************************************************************************
# Function that checks if one of the slicer threads is running. ***********
# *************************************************************************
def checkSlicer(self):
# Call the model collections internal checker methods.
self.modelCollection.checkSlicer()
# Enable slice stack save button.
self.buttonSaveSlices.setEnabled(self.modelCollection.sliceCombinerFinished and self.modelCollection.getNumberOfActiveModels() > 0)
if self.slicerRunning and self.modelCollection.sliceCombinerFinished:
# update the slider including the image and make print tab available.
self.updateSlider()
self.setGuiState(3)
self.slicerRunning = False
# If slicer is running...
elif self.slicerRunning and not self.modelCollection.sliceCombinerFinished or self.modelCollection.getNumberOfActiveModels() == 0:
# disable the print tab.
if self.getGuiState() == 3:
self.setGuiState(2)
self.slicerRunning = not self.modelCollection.sliceCombinerFinished
def updateSlicingEntries(self):
print self.toggleButtonHollow.isChecked()
print self.toggleButtonFill.isChecked()
self.entryFillShellThickness.setEnabled(self.toggleButtonHollow.isChecked())
enableFill = self.toggleButtonHollow.isChecked and self.toggleButtonFill.isChecked()
self.entryFillSpacing.setEnabled(enableFill)
self.entryFillThickness.setEnabled(enableFill)
def updateVolume(self):
self.resinVolumeLabel.setText("Volume: " + str(self.modelCollection.getTotalVolume()) + " ml.")
def setGuiState(self, state):
# State 4 is for printing.
if state == 4:
# Disable the model, supports and slicer tabs.
for i in range(self.notebookSettings.count()):
if i < 3:
self.notebookSettings.setTabEnabled(i, False)
else:
self.notebookSettings.setTabEnabled(i, True)
# Disable model list.
self.modelTableView.setEnabled(False)
# Loop through tabs and enable the ones up to the state.
else:
for i in range(self.notebookSettings.count()):
if i<=state:
self.notebookSettings.setTabEnabled(i, True)
else:
self.notebookSettings.setTabEnabled(i, False)
self.modelTableView.setEnabled(True)
def getGuiState(self):
tab = 0
for i in range(self.notebookSettings.count()):
if self.notebookSettings.isTabEnabled(i):
tab = i
return tab
# Function to update the current model after a change was made.
# Updates model supports or slicing dependent on
# the current page of the settings notebook.
def updateCurrentModel(self):
# Update model.
if self.notebookSettings.getCurrentPage() == 0:
changed = self.modelCollection.getCurrentModel().updateModel()
# If model has changed, set gui state to supports.
if changed:
self.setGuiState(1)
# Update supports
elif self.notebookSettings.getCurrentPage() == 1:
changed = self.modelCollection.getCurrentModel().updateSupports()
# If supports have changed, set gui state to slicer.
if changed:
self.setGuiState(2)
elif self.notebookSettings.getCurrentPage() == 2:
self.modelCollection.getCurrentModel().updateSliceStack()
# Don't set gui state, this will be done by a timeout method
# that polls the slicer thread.
def updateAllModels(self):
if self.notebookSettings.getCurrentPage() == 2:
self.modelCollection.updateSliceStack()
elif self.notebookSettings.getCurrentPage() == 1:
self.modelCollection.updateAllSupports()
elif self.notebookSettings.getCurrentPage() == 0:
self.modelCollection.updateAllModels()
# Update all the settings if the current model has changed.
def updateAllEntries(self, state=None, render=None):
#print self.modelCollection.getCurrentModel()
if not self.modelCollection.getCurrentModel().isActive() or self.modelCollection.getCurrentModelId() == 'default':
self.entryScaling.setEnabled(False)
self.entryRotationX.setEnabled(False)
self.entryRotationY.setEnabled(False)
self.entryRotationZ.setEnabled(False)
self.entryPositionX.setEnabled(False)
self.entryPositionY.setEnabled(False)
self.entryBottomClearance.setEnabled(False)
self.entryOverhangAngle.setEnabled(False)
self.entrySupportSpacingX.setEnabled(False)
self.entrySupportSpacingY.setEnabled(False)
self.entrySupportMaxHeight.setEnabled(False)
self.entrySupportBaseDiameter.setEnabled(False)
self.entrySupportTipDiameter.setEnabled(False)
self.entrySupportTipHeight.setEnabled(False)
self.entrySupportBottomPlateThickness.setEnabled(False)
self.entryFillSpacing.setEnabled(False)
self.entryFillThickness.setEnabled(False)
self.entryFillShellThickness.setEnabled(False)
#self.checkboxFill.setEnabled(False)
#self.checkboxHollow.setEnabled(False)
else:
self.entryScaling.setEnabled(True)
self.entryRotationX.setEnabled(True)
self.entryRotationY.setEnabled(True)
self.entryRotationZ.setEnabled(True)
self.entryPositionX.setEnabled(True)
self.entryPositionY.setEnabled(True)
self.entryBottomClearance.setEnabled(True)
self.entryOverhangAngle.setEnabled(True)
self.entrySupportSpacingX.setEnabled(True)
self.entrySupportSpacingY.setEnabled(True)
self.entrySupportMaxHeight.setEnabled(True)
self.entrySupportBaseDiameter.setEnabled(True)
self.entrySupportTipDiameter.setEnabled(True)
self.entrySupportTipHeight.setEnabled(True)
self.entrySupportBottomPlateThickness.setEnabled(True)
self.entryFillSpacing.setEnabled(True)
self.entryFillThickness.setEnabled(True)
self.entryFillShellThickness.setEnabled(True)
#self.checkboxFill.setEnabled(True)
#self.checkboxHollow.setEnabled(True)
self.entryScaling.update()
self.entryRotationX.update()
self.entryRotationY.update()
self.entryRotationZ.update()
self.entryPositionX.update()
self.entryPositionY.update()
self.entryBottomClearance.update()
self.entryOverhangAngle.update()
self.entrySupportSpacingX.update()
self.entrySupportSpacingY.update()
self.entrySupportMaxHeight.update()
self.entrySupportBaseDiameter.update()
self.entrySupportTipDiameter.update()
self.entrySupportTipHeight.update()
self.entrySupportBottomPlateThickness.update()
self.entryFillSpacing.update()
self.entryFillThickness.update()
self.entryFillShellThickness.update()
#self.checkboxFill.update()
#self.checkboxHollow.update()
# Update job settings.
self.entryLayerHeight.update()
# self.entryExposure.update()
# self.entryExposureBase.update()
# Update menu sensitivities.
self.updateMenu()
if state != None:
self.setGuiState(state)
if state == 0:
self.notebookSettings.setCurrentPage(0)
# Update the volume label in the print tab.
if self.notebookSettings.getCurrentPage() == 3:
self.updateVolume()
# Update model visibilities.
if render == True:
for model in self.modelCollection:
self.modelCollection[model].updateAllActors(self.notebookSettings.getCurrentPage())
self.renderView.render()
def updateSlider(self):
self.sliceSlider.updateSlider()
self.sliceSlider.updateImage()
def updateMenu(self):
pass
'''
self.menuItemSave.set_sensitive(self.modelCollection.modelsLoaded())
self.menuItemClose.set_sensitive(self.modelCollection.modelsLoaded())
'''
def callbackSaveSlices(self, widget, data=None):
# Open a file chooser dialog.
fileChooser = QtGui.QFileDialog()
fileChooser.setFileMode(QtGui.QFileDialog.AnyFile)
fileChooser.setFilter("Image files (*.png)")
fileChooser.setWindowTitle("Save slices")
fileChooser.setDirectory(self.programSettings['currentFolder'].getValue())
filenames = QtCore.QStringList()
# Only continue if OK was clicked.
if fileChooser.exec_() == QtGui.QDialog.Accepted:
# Get path.
filepath = str(fileChooser.selectedFiles()[0])
fileChooser.destroy()
# Add *.png file extension if necessary.
if filepath.lower()[-3:] != "png":
filepath += '.png'
# Console message.
if self.console:
self.console.addLine("Saving slice images to \"" + filepath.split('/')[-1] + "\".")
# Save path without project name for next use.
self.programSettings['currentFolder'].value = filepath[:-len(filepath.split('/')[-1])]
# Create info window with progress bar.
# This will call the save slice stack method internally.
infoWindow = monkeyprintGuiHelper.messageWindowSaveSlices(self.mainWindow, self.modelCollection, filepath)
#
self.console.addLine("Slice stack saved.")
infoWindow.close()
def callbackStartPrintProcess(self, data=None):
# Create a print start dialog.
self.dialogStart = monkeyprintGuiHelper.dialogStartPrint(parent = self.mainWindow)
# Run the dialog and get the result.
if self.dialogStart.exec_() == QtGui.QDialog.Accepted:
#self.printProcessStart()
if not self.queueCommands.qsize():
self.queueCommands.put("start:")
def callbackStopPrintProcess(self, data=None):
# Create a dialog window with yes/no buttons.
reply = QtGui.QMessageBox.question( self.mainWindow,
'Message',
"Do you really want to cancel the print?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
# Stop if desired.
if reply == QtGui.QMessageBox.Yes:
if not self.queueCommands.qsize():
self.queueCommands.put("stop:")
def printProcessStart(self):
# If starting print process on PC...
if not self.programSettings['printOnRaspberry'].value:
#... create the projector window and start the print process.
self.console.addLine("Starting print")
# Disable window close event.
self.printRunning = True
# Set gui sensitivity.
self.setGuiState(4)
# Set progressbar limit according to number of slices.
self.progressBar.setLimit(self.modelCollection.getNumberOfSlices())
# Create the projector window.2
self.projectorDisplay = monkeyprintGuiHelper.projectorDisplay(self.programSettings, self.modelCollection)
# Start the print.
self.printProcess = monkeyprintPrintProcess.printProcess(self.modelCollection, self.programSettings, self.queueSliceOut, self.queueSliceIn, self.queueStatus, self.queueConsole)
self.printProcess.start()
# Set button sensitivities.
self.buttonPrintStart.setEnabled(False)
self.buttonPrintStop.setEnabled(True)
def printProcessStop(self, data=None):
# Stop the print process.
# If print is running on Pi...
if self.programSettings['printOnRaspberry'].value:
# ... send the stop command.
command = "stop"
path = ""
self.socket.sendMulti(command, path)
else:
self.printProcess.stop()
# Reset stop button to insensitive.
self.buttonPrintStop.setEnabled(False)
# Notebook tab switch callback functions. ##################################
# Model page.
def tabSwitchModelUpdate(self):
# Set render actor visibilities.
self.modelCollection.viewState(0)
self.renderView.render()
# Enable model management load and remove buttons.
self.modelTableView.setButtonsSensitive(load=True, remove=self.modelCollection.modelsLoaded())
# Supports page.
def tabSwitchSupportsUpdate(self):
# Update supports.
self.modelCollection.updateAllSupports()
# Set render actor visibilities.
self.modelCollection.viewState(1)
self.renderView.render()
# Activate slice tab if not already activated.
if self.getGuiState() == 1:
self.setGuiState(2)
# Disable model management load and remove buttons.
self.modelTableView.setButtonsSensitive(False,False)
# Slicing page.
def tabSwitchSlicesUpdate(self):
# Update slice stack height.
self.modelCollection.updateSliceStack()
# Set render actor visibilites.
self.modelCollection.viewState(2)
self.renderView.render()
# Activate print tab if not already activated.
if self.getGuiState() == 2:
pass
#self.setGuiState(3) # Is activated or deactivated in slicer status poll method.
# Disable model management load and remove buttons.
self.modelTableView.setButtonsSensitive(False,False)
# Update slider.
self.updateSlider()
# Print page.
def tabSwitchPrintUpdate(self):
# Set render actor visibilites.
self.modelCollection.viewState(3)
# Set current slice to 0.
self.modelCollection.updateAllSlices3d(0)
self.renderView.render()
# Update the model volume.
self.updateVolume()
# Disable model management load and remove buttons.
self.modelTableView.setButtonsSensitive(False, False)
##### ##### ###### ###### ###### ## ## ##### ##### ##### ###### #### ## #### #####
## ## ## ## ## ### ## ## ## ## ## ## ## ## ## ## ## ##
#### #### ## ## ## ###### ## #### ## ## ## ## ## ## ## ## ##
## ## ## ## ## ## ### ## ### ## ## ## ## ###### ## ## ## ## ###
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
##### ##### ## ## ###### ## ## #### ##### ##### ###### ## ## ###### #### ####
# Settings window. #############################################################
# Define a window for all the settings that are related to the printer.
class dialogSettings(QtGui.QDialog):
# Override init function.
def __init__(self, settings, parent):
# Call super class init function.
QtGui.QDialog.__init__(self, parent.mainWindow)
# Set title.
self.setWindowTitle("Monkeyprint settings")
# Set modal.
self.setModal(True)
self.show()
# Internalise settings.
self.settings = settings
self.parent = parent
# Save settings in case of cancelling.
#self.settingsBackup = settings
self.reslice = False
self.restartMonkeyprint = False
self.resetGui = False
# Tooltips object.
#TODO
# Vertical box for settings and bottom buttons.
self.boxMain = QtGui.QVBoxLayout()
self.setLayout(self.boxMain)
# Create notebook.
self.notebookSettings = monkeyprintGuiHelper.notebook()
self.boxMain.addWidget(self.notebookSettings)
# Create notebook pages.
# Main settings.
self.tabMainSettings = self.createMainSettingsTab()
self.notebookSettings.addTab(self.tabMainSettings, 'Main settings')
# Set slicer memory label.
self.updateSlicerMemoryUsage()
# Communication settings.
self.tabCommunicationSettings = self.createProjectorTab()
self.notebookSettings.addTab(self.tabCommunicationSettings, 'Projector')
# Print process settings.
self.tabPrintProcessSettings = self.createPrintProcessTab()
self.notebookSettings.addTab(self.tabPrintProcessSettings, 'Print process')
'''
# Set sensitivities according to toggle buttons in main settings tab.
self.callbackRaspiToggle(None, None)
'''
# Create bottom buttons.
# Horizontal box for buttons.
boxButtons = QtGui.QHBoxLayout()
self.boxMain.addLayout(boxButtons)
# Close button.
self.buttonClose = QtGui.QPushButton("Close")
boxButtons.addWidget(self.buttonClose)
self.buttonClose.clicked.connect(self.callbackClose)
# Restore defaults button.
self.buttonDefaults = QtGui.QPushButton("Load defaults")
boxButtons.addWidget(self.buttonDefaults)
self.buttonDefaults.clicked.connect(self.callbackDefaults)
# Main settings tab.
def createMainSettingsTab(self):
# Create widget.
tabSettingsMain = QtGui.QWidget()
# Create main tab box.
boxSettingsMain = QtGui.QVBoxLayout()
tabSettingsMain.setLayout(boxSettingsMain)
# Frame for build space.
frameBuildVolume = QtGui.QGroupBox("Build space")
frameBuildVolume.setFlat(False)
boxSettingsMain.addWidget(frameBuildVolume)
boxBuildVolume= QtGui.QVBoxLayout()
frameBuildVolume.setLayout(boxBuildVolume)
# Add entries.
self.entryBuildSizeX= monkeyprintGuiHelper.entry('buildSizeX', self.settings, customFunctions=[self.setResetGuiFlag])
boxBuildVolume.addWidget(self.entryBuildSizeX)
self.entryBuildSizeY= monkeyprintGuiHelper.entry('buildSizeY', self.settings, customFunctions=[self.setResetGuiFlag])
boxBuildVolume.addWidget(self.entryBuildSizeY)
self.entryBuildSizeZ= monkeyprintGuiHelper.entry('buildSizeZ', self.settings, customFunctions=[self.setResetGuiFlag])
boxBuildVolume.addWidget(self.entryBuildSizeZ)
# Frame for build space.
frameSerial = QtGui.QGroupBox("Serial connection")
frameSerial.setFlat(False)
boxSettingsMain.addWidget(frameSerial)
boxSerial= QtGui.QVBoxLayout()
frameSerial.setLayout(boxSerial)
# Add entries.
self.entryPort = monkeyprintGuiHelper.entry('port', self.settings)
boxSerial.addWidget(self.entryPort)
self.entryBaud = monkeyprintGuiHelper.entry('baudrate', self.settings)
boxSerial.addWidget(self.entryBaud)
# Frame for slicer settings.
frameSlicer = QtGui.QGroupBox("Slicer")
frameSlicer.setFlat(False)
boxSettingsMain.addWidget(frameSlicer)
boxSlicer= QtGui.QVBoxLayout()
frameSlicer.setLayout(boxSlicer)
# Add entries.
self.entryNumberOfPreviewSlices = monkeyprintGuiHelper.entry('previewSlicesMax', settings=self.settings, customFunctions=[self.updateSlicerMemoryUsage, self.setResliceFlag])
boxSlicer.addWidget(self.entryNumberOfPreviewSlices)
self.labelSlicerMemory = QtGui.QLabel()
boxSlicer.addWidget(self.labelSlicerMemory)
self.checkbuttonMultiBodySlicing = monkeyprintGuiHelper.toggleButton('multiBodySlicing', settings=self.settings, customFunctions=[self.setResliceFlag])
boxSlicer.addWidget(self.checkbuttonMultiBodySlicing)
# Frame for debug settings.
frameDebug = QtGui.QGroupBox("Debug")
frameDebug.setFlat(False)
boxSettingsMain.addWidget(frameDebug)
boxDebug= QtGui.QVBoxLayout()
frameDebug.setLayout(boxDebug)
# Add entry.
self.checkbuttonDebug = monkeyprintGuiHelper.toggleButton('debug', settings=self.settings)
boxDebug.addWidget(self.checkbuttonDebug)
return tabSettingsMain
# Communication tab.
def createProjectorTab(self):
# Create widget.
tabSettingsProjector = QtGui.QWidget()
# Create main tab box.
boxSettingsProjector = QtGui.QVBoxLayout()
tabSettingsProjector.setLayout(boxSettingsProjector)
# Frame for projector resolution.
frameProjector = QtGui.QGroupBox("Projector")
boxSettingsProjector.addWidget(frameProjector)
boxProjector = QtGui.QVBoxLayout()
frameProjector.setLayout(boxProjector)
self.entryProjectorSizeX = monkeyprintGuiHelper.entry('projectorSizeX', self.settings, customFunctions=[self.setResliceFlag])#, displayString="Projector size X")
boxProjector.addWidget(self.entryProjectorSizeX)
self.entryProjectorSizeY = monkeyprintGuiHelper.entry('projectorSizeY', self.settings, customFunctions=[self.setResliceFlag])#, displayString="Projector size Y")
boxProjector.addWidget(self.entryProjectorSizeY)
self.entryProjectorPositionX = monkeyprintGuiHelper.entry('projectorPositionX', self.settings)
boxProjector.addWidget(self.entryProjectorPositionX)
self.entryProjectorPositionY = monkeyprintGuiHelper.entry('projectorPositionY', self.settings)
boxProjector.addWidget(self.entryProjectorPositionY)
# Frame for build space.
frameProjectorControl = QtGui.QGroupBox("Projector control")
frameProjectorControl.setFlat(False)
boxSettingsProjector.addWidget(frameProjectorControl)
boxProjectorControl= QtGui.QVBoxLayout()
frameProjectorControl.setLayout(boxProjectorControl)
# Add entries.
self.entryProjectorOnCommand= monkeyprintGuiHelper.entry('projectorOnCommand', self.settings)
boxProjectorControl.addWidget(self.entryProjectorOnCommand)
self.entryProjectorOffCommand= monkeyprintGuiHelper.entry('projectorOffCommand', self.settings)
boxProjectorControl.addWidget(self.entryProjectorOffCommand)
self.entryProjectorPort= monkeyprintGuiHelper.entry('projectorPort', self.settings)
boxProjectorControl.addWidget(self.entryProjectorPort)
self.entryProjectorBaud= monkeyprintGuiHelper.entry('projectorBaudrate', self.settings)
boxProjectorControl.addWidget(self.entryProjectorBaud)
# Frame for calibration image.
frameCalibrationImage = QtGui.QGroupBox("Calibration image")
frameCalibrationImage.setFlat(False)
boxSettingsProjector.addWidget(frameCalibrationImage)
boxCalibrationImage= QtGui.QVBoxLayout()
frameCalibrationImage.setLayout(boxCalibrationImage)
# Image container to load from file.
self.imageContainer = monkeyprintGuiHelper.imageFromFile(self.settings, 200, customFunctions=[self.setResliceFlag])
boxCalibrationImage.addWidget(self.imageContainer)
return tabSettingsProjector
# Print process tab.
def createPrintProcessTab(self):
return monkeyprintGuiHelper.printProcessTableView(self.settings, self.parent)
'''
# Create widget.
tabSettingsPrintProcess = QtGui.QWidget()
# Create main tab box.
boxSettingsPrintProcess = QtGui.QVBoxLayout()
tabSettingsPrintProcess.setLayout(boxSettingsPrintProcess)
#self.listViewModules = modulesListView(self.settings, parentWindow=self)
#boxSettingsPrintProcess.pack_start(self.listViewModules, expand=True, fill=True, padding=5)
#self.listViewModules.show()
return tabSettingsPrintProcess
'''
# Recalculate the approximate memory useage due to preview slice stack.
def updateSlicerMemoryUsage(self):
aspect = float(self.settings['projectorSizeY'].value) / float(self.settings['projectorSizeX'].value)
height = self.settings['previewSliceWidth'].value * aspect
# Calc memory for one slice in MB. Plus 112 byte for numpy array overhead.
sliceMemory = (self.settings['previewSliceWidth'].value * height + 112) / 1000000.
stackMemory = self.settings['previewSlicesMax'].value * sliceMemory
# Display in label.
self.labelSlicerMemory.setText('Memory usage: ~' + str(int(stackMemory)) + " MB.")
return stackMemory
def setRestartFlag(self):
self.restartMonkeyprint = True
def setResetGuiFlag(self):
self.resetGui = True
def setResliceFlag(self):
self.reslice = True
# Serial connect function.
def callbackSerialTest(self, widget, data=None):
# Create communication queues.
self.queueSerial = Queue.Queue()
queueSerialCommands = Queue.Queue()
self.command = ["ping", None, True, None] # No value, retry, don't wait.
# Make button insensitive.
self.buttonSerialTest.set_sensitive(False)
self.buttonSerialTest.set_label(" Wait... ")
self.consoleSerial.addLine("Connecting...")
# Start queue listener.
listenerIdSerial = gobject.timeout_add(500, self.listenerSerialThread)
self.serial = monkeyprintSerial.printer(self.settings, self.queueSerial, queueSerialCommands)
# Send ping.
if self.serial.serial != None:
self.serial.send(self.command)
def listenerSerialThread(self):
# If a message is in the queue...
if self.queueSerial.qsize():
# Get the message and display it.
message = self.queueSerial.get()
self.consoleSerial.addLine(message)
self.consoleSerial.addLine("")
# Check if the message was the end message.
if message == "Command \"" + self.command[0] + "\" sent successfully." or message == "Printer not responding. Giving up...":
# Restore send button.
self.buttonSerialTest.set_sensitive(True)
self.buttonSerialTest.set_label("Test serial")
# Close and delete serial.
self.serial.stop()
self.serial.close()
del self.serial
# Return False to remove listener from timeout.
return False
else:
return True
else:
# Add a dot to the console to let people know the program is not blocked...
self.consoleSerial.addString(".")
# Return True to keep listener in timeout.
return True
# Defaults function.
def callbackDefaults(self, widget, data=None):
# Load default settings.
self.settings.loadDefaults()
self.imageContainer.updateImage()
# Destroy function.
def callbackClose(self, widget, data=None):
pass
# Delete the calibration image in case it was just added.
if (self.settings['calibrationImage'].value == False):
self.imageContainer.deleteImageFile()
'''
# Restart the file transmission thread.
if self.settings['printOnRaspberry'].value:
ipFileClient = self.settings['ipAddressRaspi'].value
portFileClient = self.settings['fileTransmissionPortRaspi'].value
if self.parentWindow.threadFileTransmission != None:
self.parentWindow.threadFileTransmission.join(100)
self.parentWindow.threadFileTransmission.reset(ipFileClient, portFileClient)
self.parentWindow.threadFileTransmission.run()
# Restart the communication socket.
if self.settings['printOnRaspberry'].value:
ipCommClient = self.settings['ipAddressRaspi'].value
portCommClient = self.settings['networkPortRaspi'].value
self.parentWindow.socket.reset(ipCommClient, portCommClient)
'''
# Set print process modules to settings.
self.settings.setPrintProcessList(self.tabPrintProcessSettings.getPrintProcessList())
# Set print resolution.
# self.settings['pxPerMm'].value = self.settings['projectorSizeX'].value / self.settings['buildSizeX'].value
self.settings['pxPerMmX'].setValue(self.settings['projectorSizeX'].value / self.settings['buildSizeX'].value)
self.settings['pxPerMmY'].setValue(self.settings['projectorSizeY'].value / self.settings['buildSizeY'].value)
# Update parent window in response to changing boards.
self.parent.updateAllEntries(render=True)
if self.resetGui:
# Reset build volume box.
self.parent.renderView.buildVolume.resize((self.settings['buildSizeX'].value, self.settings['buildSizeY'].value, self.settings['buildSizeZ'].value))
self.parent.notebookSettings.setCurrentPage(0)
if len(self.parent.modelCollection) > 1:
self.parent.setGuiState(1)
else:
self.parent.setGuiState(0)
self.parent.updateAllModels()
elif self.reslice:
# Set to slicer page if currently in print page.
if self.parent.notebookSettings.getCurrentPage() == 3:
self.parent.notebookSettings.setCurrentPage(2)
# Set gui state if more than the empty default model exists.
if len(self.parent.modelCollection) > 1:
self.parent.setGuiState(2)
self.parent.updateAllModels()
self.parent.updateSlider()
# Close.
self.close()
|
robotsinthesun/monkeyprint
|
monkeyprintGui.py
|
Python
|
gpl-2.0
| 60,767
|
[
"VTK"
] |
57f6549c9d22a712df65ff65206ed4b7f67d0a934b676010d060bf40351d66ff
|
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from fluggo import sortlist, signal
from fluggo.editor import model, plugins
from fluggo.media import process
class SpaceAudioManager(plugins.AudioStream):
class ItemWatcher(object):
def __init__(self, owner, canvas_item, workspace_item, stream):
self.owner = owner
self.canvas_item = canvas_item
self.workspace_item = workspace_item
self.canvas_item.updated.connect(self.handle_updated)
self.stream = stream
def handle_updated(self, **kw):
# Raise the frames_updated signal if the content of frames changed
if 'x' in kw or 'length' in kw or 'offset' in kw:
old_x, old_length, old_offset = self.workspace_item.x, self.workspace_item.length, self.workspace_item.offset
new_x, new_length, new_offset = kw.get('x', old_x), kw.get('length', old_length), kw.get('offset', old_offset)
old_right, new_right = old_x + old_length, new_x + new_length
self.workspace_item.update(
x=kw.get('x', old_x),
length=kw.get('length', old_length),
offset=kw.get('offset', old_offset)
)
# Update the currently displayed frame if it's in a changed region
if old_x != new_x:
self.owner.frames_updated(min(old_x, new_x), max(old_x, new_x) - 1)
if old_right != new_right:
self.owner.frames_updated(min(old_right, new_right), max(old_right, new_right) - 1)
if old_x - old_offset != new_x - new_offset:
self.owner.frames_updated(max(old_x, new_x), min(old_right, new_right) - 1)
def unwatch(self):
self.canvas_item.updated.disconnect(self.handle_updated)
def __init__(self, canvas_space, source_list):
self.workspace = process.AudioWorkspace()
format = canvas_space.audio_format
plugins.AudioStream.__init__(self, self.workspace, format)
self.canvas_space = canvas_space
self.canvas_space.item_added.connect(self.handle_item_added)
self.canvas_space.item_removed.connect(self.handle_item_removed)
self.source_list = source_list
self.watchers = {}
for item in canvas_space:
if item.type() == 'audio':
self.handle_item_added(item)
def handle_item_added(self, item):
if not isinstance(item, model.Item):
return
if item.type() != 'audio':
return
stream = None
offset = 0
if isinstance(item, model.Sequence):
raise NotImplementedError('Need a SequenceAudioManager here')
elif hasattr(item, 'source'):
stream = model.AudioSourceRefConnector(self.source_list, item.source, model_obj=item)
offset = item.offset
self.follow_alerts(stream)
workspace_item = self.workspace.add(x=item.x, length=item.length, offset=offset, source=stream)
watcher = self.ItemWatcher(self, item, workspace_item, stream)
self.watchers[id(item)] = watcher
def handle_item_removed(self, item):
if item.type() != 'audio':
return
watcher = self.watchers.pop(id(item))
watcher.unwatch()
self.unfollow_alerts(watcher.stream)
self.workspace.remove(watcher.workspace_item)
|
fluggo/Canvas
|
fluggo/editor/graph/audio.py
|
Python
|
gpl-3.0
| 4,204
|
[
"Brian"
] |
19e6c72802db6291494511b750bc3b4bf9db00a486a3b346f0c50c4be777b322
|
# SCDEwrapper/SCDEwrapper.py - a self annotated version of rgToolFactory.py generated by running rgToolFactory.py
# to make a new Galaxy tool called SCDEwrapper
# User admin@galaxy.org at 16/04/2015 10:09:46
# rgToolFactory.py
# see https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
#
# all rights reserved
# Licensed under the LGPL
# suggestions for improvement and bug fixes welcome at https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# August 2014
# merged John Chilton's citation addition and ideas from Marius van den Beek to enable arbitrary
# data types for input and output - thanks!
#
# march 2014
# had to remove dependencies because cross toolshed dependencies are not possible - can't pre-specify a toolshed url for graphicsmagick and ghostscript
# grrrrr - night before a demo
# added dependencies to a tool_dependencies.xml if html page generated so generated tool is properly portable
#
# added ghostscript and graphicsmagick as dependencies
# fixed a wierd problem where gs was trying to use the new_files_path from universe (database/tmp) as ./database/tmp
# errors ensued
#
# august 2013
# found a problem with GS if $TMP or $TEMP missing - now inject /tmp and warn
#
# july 2013
# added ability to combine images and individual log files into html output
# just make sure there's a log file foo.log and it will be output
# together with all images named like "foo_*.pdf
# otherwise old format for html
#
# January 2013
# problem pointed out by Carlos Borroto
# added escaping for <>$ - thought I did that ages ago...
#
# August 11 2012
# changed to use shell=False and cl as a sequence
# This is a Galaxy tool factory for simple scripts in python, R or whatever ails ye.
# It also serves as the wrapper for the new tool.
#
# you paste and run your script
# Only works for simple scripts that read one input from the history.
# Optionally can write one new history dataset,
# and optionally collect any number of outputs into links on an autogenerated HTML page.
# DO NOT install on a public or important site - please.
# installed generated tools are fine if the script is safe.
# They just run normally and their user cannot do anything unusually insecure
# but please, practice safe toolshed.
# Read the fucking code before you install any tool
# especially this one
# After you get the script working on some test data, you can
# optionally generate a toolshed compatible gzip file
# containing your script safely wrapped as an ordinary Galaxy script in your local toolshed for
# safe and largely automated installation in a production Galaxy.
# If you opt for an HTML output, you get all the script outputs arranged
# as a single Html history item - all output files are linked, thumbnails for all the pdfs.
# Ugly but really inexpensive.
#
# Patches appreciated please.
#
#
# long route to June 2012 product
# Behold the awesome power of Galaxy and the toolshed with the tool factory to bind them
# derived from an integrated script model
# called rgBaseScriptWrapper.py
# Note to the unwary:
# This tool allows arbitrary scripting on your Galaxy as the Galaxy user
# There is nothing stopping a malicious user doing whatever they choose
# Extremely dangerous!!
# Totally insecure. So, trusted users only
#
# preferred model is a developer using their throw away workstation instance - ie a private site.
# no real risk. The universe_wsgi.ini admin_users string is checked - only admin users are permitted to run this tool.
#
import sys
import shutil
import subprocess
import os
import time
import tempfile
import optparse
import tarfile
import re
import shutil
import math
progname = os.path.split(sys.argv[0])[1]
myversion = 'V001.1 March 2014'
verbose = False
debug = False
toolFactoryURL = 'https://bitbucket.org/fubar/galaxytoolfactory'
# if we do html we need these dependencies specified in a tool_dependencies.xml file and referred to in the generated
# tool xml
toolhtmldepskel = """<?xml version="1.0"?>
<tool_dependency>
<package name="ghostscript" version="9.10">
<repository name="package_ghostscript_9_10" owner="devteam" prior_installation_required="True" />
</package>
<package name="graphicsmagick" version="1.3.18">
<repository name="package_graphicsmagick_1_3" owner="iuc" prior_installation_required="True" />
</package>
<readme>
%s
</readme>
</tool_dependency>
"""
protorequirements = """<requirements>
<requirement type="package" version="9.10">ghostscript</requirement>
<requirement type="package" version="1.3.18">graphicsmagick</requirement>
</requirements>"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
"$": "\$"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def parse_citations(citations_text):
"""
"""
citations = [c for c in citations_text.split("**ENTRY**") if c.strip()]
citation_tuples = []
for citation in citations:
if citation.startswith("doi"):
citation_tuples.append( ("doi", citation[len("doi"):].strip() ) )
else:
citation_tuples.append( ("bibtex", citation[len("bibtex"):].strip() ) )
return citation_tuples
class ScriptRunner:
"""class is a wrapper for an arbitrary script
"""
def __init__(self,opts=None,treatbashSpecial=True):
"""
cleanup inputs, setup some outputs
"""
self.useGM = cmd_exists('gm')
self.useIM = cmd_exists('convert')
self.useGS = cmd_exists('gs')
self.temp_warned = False # we want only one warning if $TMP not set
self.treatbashSpecial = treatbashSpecial
if opts.output_dir: # simplify for the tool tarball
os.chdir(opts.output_dir)
self.thumbformat = 'png'
self.opts = opts
self.toolname = re.sub('[^a-zA-Z0-9_]+', '', opts.tool_name) # a sanitizer now does this but..
self.toolid = self.toolname
self.myname = sys.argv[0] # get our name because we write ourselves out as a tool later
self.pyfile = self.myname # crude but efficient - the cruft won't hurt much
self.xmlfile = '%s.xml' % self.toolname
s = open(self.opts.script_path,'r').readlines()
s = [x.rstrip() for x in s] # remove pesky dos line endings if needed
self.script = '\n'.join(s)
fhandle,self.sfile = tempfile.mkstemp(prefix=self.toolname,suffix=".%s" % (opts.interpreter))
tscript = open(self.sfile,'w') # use self.sfile as script source for Popen
tscript.write(self.script)
tscript.close()
self.indentedScript = '\n'.join([' %s' % html_escape(x) for x in s]) # for restructured text in help
self.escapedScript = '\n'.join([html_escape(x) for x in s])
self.elog = os.path.join(self.opts.output_dir,"%s_error.log" % self.toolname)
if opts.output_dir: # may not want these complexities
self.tlog = os.path.join(self.opts.output_dir,"%s_runner.log" % self.toolname)
art = '%s.%s' % (self.toolname,opts.interpreter)
artpath = os.path.join(self.opts.output_dir,art) # need full path
artifact = open(artpath,'w') # use self.sfile as script source for Popen
artifact.write(self.script)
artifact.close()
self.cl = []
self.html = []
a = self.cl.append
a(opts.interpreter)
if self.treatbashSpecial and opts.interpreter in ['bash','sh']:
a(self.sfile)
else:
a('-') # stdin
a(opts.input_tab)
a(opts.output_tab)
a(opts.notuse_flex)
a(opts.dropout_base)
a(opts.failed_base)
a(opts.count_fpkm)
self.outputFormat = self.opts.output_format
self.inputFormats = self.opts.input_formats
self.test1Input = '%s_test1_input.xls' % self.toolname
self.test1Output = '%s_test1_output.xls' % self.toolname
self.test1HTML = '%s_test1_output.html' % self.toolname
def makeXML(self):
"""
Create a Galaxy xml tool wrapper for the new script as a string to write out
fixme - use templating or something less fugly than this example of what we produce
<tool id="reverse" name="reverse" version="0.01">
<description>a tabular file</description>
<command interpreter="python">
reverse.py --script_path "$runMe" --interpreter "python"
--tool_name "reverse" --input_tab "$input1" --output_tab "$tab_file"
</command>
<inputs>
<param name="input1" type="data" format="tabular" label="Select a suitable input file from your history"/><param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="reverse"/>
</inputs>
<outputs>
<data format="tabular" name="tab_file" label="${job_name}"/>
</outputs>
<help>
**What it Does**
Reverse the columns in a tabular file
</help>
<configfiles>
<configfile name="runMe">
# reverse order of columns in a tabular file
import sys
inp = sys.argv[1]
outp = sys.argv[2]
i = open(inp,'r')
o = open(outp,'w')
for row in i:
rs = row.rstrip().split('\t')
rs.reverse()
o.write('\t'.join(rs))
o.write('\n')
i.close()
o.close()
</configfile>
</configfiles>
</tool>
"""
newXML="""<tool id="%(toolid)s" name="%(toolname)s" version="%(tool_version)s">
%(tooldesc)s
%(requirements)s
<command interpreter="python">
%(command)s
</command>
<inputs>
%(inputs)s
</inputs>
<outputs>
%(outputs)s
</outputs>
<configfiles>
<configfile name="runMe">
%(script)s
</configfile>
</configfiles>
%(tooltests)s
<help>
%(help)s
</help>
<citations>
%(citations)s
<citation type="doi">10.1093/bioinformatics/bts573</citation>
</citations>
</tool>""" # needs a dict with toolname, toolid, interpreter, scriptname, command, inputs as a multi line string ready to write, outputs ditto, help ditto
newCommand="""
%(toolname)s.py --script_path "$runMe" --interpreter "%(interpreter)s"
--tool_name "%(toolname)s" %(command_inputs)s %(command_outputs)s """
# may NOT be an input or htmlout - appended later
tooltestsTabOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s"/>
</test>
</tests>
"""
tooltestsHTMLOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="5"/>
</test>
</tests>
"""
tooltestsBoth = """<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s" />
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="10"/>
</test>
</tests>
"""
xdict = {}
xdict['outputFormat'] = self.outputFormat
xdict['inputFormats'] = self.inputFormats
xdict['requirements'] = ''
if self.opts.make_HTML:
if self.opts.include_dependencies == "yes":
xdict['requirements'] = protorequirements
xdict['tool_version'] = self.opts.tool_version
xdict['test1Input'] = self.test1Input
xdict['test1HTML'] = self.test1HTML
xdict['test1Output'] = self.test1Output
if self.opts.make_HTML and self.opts.output_tab <> 'None':
xdict['tooltests'] = tooltestsBoth % xdict
elif self.opts.make_HTML:
xdict['tooltests'] = tooltestsHTMLOnly % xdict
else:
xdict['tooltests'] = tooltestsTabOnly % xdict
xdict['script'] = self.escapedScript
# configfile is least painful way to embed script to avoid external dependencies
# but requires escaping of <, > and $ to avoid Mako parsing
if self.opts.help_text:
helptext = open(self.opts.help_text,'r').readlines()
helptext = [html_escape(x) for x in helptext] # must html escape here too - thanks to Marius van den Beek
xdict['help'] = ''.join([x for x in helptext])
else:
xdict['help'] = 'Please ask the tool author (%s) for help as none was supplied at tool generation\n' % (self.opts.user_email)
if self.opts.citations:
citationstext = open(self.opts.citations,'r').read()
citation_tuples = parse_citations(citationstext)
citations_xml = ""
for citation_type, citation_content in citation_tuples:
citation_xml = """<citation type="%s">%s</citation>""" % (citation_type, html_escape(citation_content))
citations_xml += citation_xml
xdict['citations'] = citations_xml
else:
xdict['citations'] = ""
coda = ['**Script**','Pressing execute will run the following code over your input file and generate some outputs in your history::']
coda.append('\n')
coda.append(self.indentedScript)
coda.append('\n**Attribution**\nThis Galaxy tool was created by %s at %s\nusing the Galaxy Tool Factory.\n' % (self.opts.user_email,timenow()))
coda.append('See %s for details of that project' % (toolFactoryURL))
coda.append('Please cite: Creating re-usable tools from scripts: The Galaxy Tool Factory. Ross Lazarus; Antony Kaspi; Mark Ziemann; The Galaxy Team. ')
coda.append('Bioinformatics 2012; doi: 10.1093/bioinformatics/bts573\n')
xdict['help'] = '%s\n%s' % (xdict['help'],'\n'.join(coda))
if self.opts.tool_desc:
xdict['tooldesc'] = '<description>%s</description>' % self.opts.tool_desc
else:
xdict['tooldesc'] = ''
xdict['command_outputs'] = ''
xdict['outputs'] = ''
if self.opts.input_tab <> 'None':
xdict['command_inputs'] = '--input_tab "$input1" ' # the space may matter a lot if we append something
xdict['inputs'] = '<param name="input1" type="data" format="%s" label="Select a suitable input file from your history"/> \n' % self.inputFormats
else:
xdict['command_inputs'] = '' # assume no input - eg a random data generator
xdict['inputs'] = ''
xdict['inputs'] += '<param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="%s"/> \n' % self.toolname
xdict['toolname'] = self.toolname
xdict['toolid'] = self.toolid
xdict['interpreter'] = self.opts.interpreter
xdict['scriptname'] = self.sfile
if self.opts.make_HTML:
xdict['command_outputs'] += ' --output_dir "$html_file.files_path" --output_html "$html_file" --make_HTML "yes"'
xdict['outputs'] += ' <data format="html" name="html_file" label="${job_name}.html"/>\n'
else:
xdict['command_outputs'] += ' --output_dir "./"'
if self.opts.output_tab <> 'None':
xdict['command_outputs'] += ' --output_tab "$tab_file"'
xdict['outputs'] += ' <data format="%s" name="tab_file" label="${job_name}"/>\n' % self.outputFormat
xdict['command'] = newCommand % xdict
xmls = newXML % xdict
xf = open(self.xmlfile,'w')
xf.write(xmls)
xf.write('\n')
xf.close()
# ready for the tarball
def makeTooltar(self):
"""
a tool is a gz tarball with eg
/toolname/tool.xml /toolname/tool.py /toolname/test-data/test1_in.foo ...
"""
retval = self.run()
if retval:
print >> sys.stderr,'## Run failed. Cannot build yet. Please fix and retry'
sys.exit(1)
tdir = self.toolname
os.mkdir(tdir)
self.makeXML()
if self.opts.make_HTML:
if self.opts.help_text:
hlp = open(self.opts.help_text,'r').read()
else:
hlp = 'Please ask the tool author for help as none was supplied at tool generation\n'
if self.opts.include_dependencies:
tooldepcontent = toolhtmldepskel % hlp
depf = open(os.path.join(tdir,'tool_dependencies.xml'),'w')
depf.write(tooldepcontent)
depf.write('\n')
depf.close()
if self.opts.input_tab <> 'None': # no reproducible test otherwise? TODO: maybe..
testdir = os.path.join(tdir,'test-data')
os.mkdir(testdir) # make tests directory
shutil.copyfile(self.opts.input_tab,os.path.join(testdir,self.test1Input))
if self.opts.output_tab <> 'None':
shutil.copyfile(self.opts.output_tab,os.path.join(testdir,self.test1Output))
if self.opts.make_HTML:
shutil.copyfile(self.opts.output_html,os.path.join(testdir,self.test1HTML))
if self.opts.output_dir:
shutil.copyfile(self.tlog,os.path.join(testdir,'test1_out.log'))
outpif = '%s.py' % self.toolname # new name
outpiname = os.path.join(tdir,outpif) # path for the tool tarball
pyin = os.path.basename(self.pyfile) # our name - we rewrite ourselves (TM)
notes = ['# %s - a self annotated version of %s generated by running %s\n' % (outpiname,pyin,pyin),]
notes.append('# to make a new Galaxy tool called %s\n' % self.toolname)
notes.append('# User %s at %s\n' % (self.opts.user_email,timenow()))
pi = open(self.pyfile,'r').readlines() # our code becomes new tool wrapper (!) - first Galaxy worm
notes += pi
outpi = open(outpiname,'w')
outpi.write(''.join(notes))
outpi.write('\n')
outpi.close()
stname = os.path.join(tdir,self.sfile)
if not os.path.exists(stname):
shutil.copyfile(self.sfile, stname)
xtname = os.path.join(tdir,self.xmlfile)
if not os.path.exists(xtname):
shutil.copyfile(self.xmlfile,xtname)
tarpath = "%s.gz" % self.toolname
tar = tarfile.open(tarpath, "w:gz")
tar.add(tdir,arcname=self.toolname)
tar.close()
shutil.copyfile(tarpath,self.opts.new_tool)
shutil.rmtree(tdir)
## TODO: replace with optional direct upload to local toolshed?
return retval
def compressPDF(self,inpdf=None,thumbformat='png'):
"""need absolute path to pdf
note that GS gets confoozled if no $TMP or $TEMP
so we set it
"""
assert os.path.isfile(inpdf), "## Input %s supplied to %s compressPDF not found" % (inpdf,self.myName)
hlog = os.path.join(self.opts.output_dir,"compress_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'a')
our_env = os.environ.copy()
our_tmp = our_env.get('TMP',None)
if not our_tmp:
our_tmp = our_env.get('TEMP',None)
if not (our_tmp and os.path.exists(our_tmp)):
newtmp = os.path.join(self.opts.output_dir,'tmp')
try:
os.mkdir(newtmp)
except:
sto.write('## WARNING - cannot make %s - it may exist or permissions need fixing\n' % newtmp)
our_env['TEMP'] = newtmp
if not self.temp_warned:
sto.write('## WARNING - no $TMP or $TEMP!!! Please fix - using %s temporarily\n' % newtmp)
self.temp_warned = True
outpdf = '%s_compressed' % inpdf
cl = ["gs", "-sDEVICE=pdfwrite", "-dNOPAUSE", "-dUseCIEColor", "-dBATCH","-dPDFSETTINGS=/printer", "-sOutputFile=%s" % outpdf,inpdf]
x = subprocess.Popen(cl,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval1 = x.wait()
sto.close()
if retval1 == 0:
os.unlink(inpdf)
shutil.move(outpdf,inpdf)
os.unlink(hlog)
hlog = os.path.join(self.opts.output_dir,"thumbnail_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'w')
outpng = '%s.%s' % (os.path.splitext(inpdf)[0],thumbformat)
if self.useGM:
cl2 = ['gm', 'convert', inpdf, outpng]
else: # assume imagemagick
cl2 = ['convert', inpdf + '[0]', outpng]
x = subprocess.Popen(cl2,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval2 = x.wait()
sto.close()
if retval2 == 0:
os.unlink(hlog)
retval = retval1 or retval2
return retval
def getfSize(self,fpath,outpath):
"""
format a nice file size string
"""
size = ''
fp = os.path.join(outpath,fpath)
if os.path.isfile(fp):
size = '0 B'
n = float(os.path.getsize(fp))
if n > 2**20:
size = '%1.1f MB' % (n/2**20)
elif n > 2**10:
size = '%1.1f KB' % (n/2**10)
elif n > 0:
size = '%d B' % (int(n))
return size
def makeHtml(self):
""" Create an HTML file content to list all the artifacts found in the output_dir
"""
galhtmlprefix = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="toolFormBody">
"""
galhtmlattr = """<hr/><div class="infomessage">This tool (%s) was generated by the <a href="https://bitbucket.org/fubar/galaxytoolfactory/overview">Galaxy Tool Factory</a></div><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
flist = os.listdir(self.opts.output_dir)
flist = [x for x in flist if x <> 'Rplots.pdf']
flist.sort()
html = []
html.append(galhtmlprefix % progname)
html.append('<div class="infomessage">Galaxy Tool "%s" run at %s</div><br/>' % (self.toolname,timenow()))
fhtml = []
if len(flist) > 0:
logfiles = [x for x in flist if x.lower().endswith('.log')] # log file names determine sections
logfiles.sort()
logfiles = [x for x in logfiles if os.path.abspath(x) <> os.path.abspath(self.tlog)]
logfiles.append(os.path.abspath(self.tlog)) # make it the last one
pdflist = []
npdf = len([x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf' or os.path.splitext(x)[-1].lower() == '.png'])
for rownum,fname in enumerate(flist):
dname,e = os.path.splitext(fname)
sfsize = self.getfSize(fname,self.opts.output_dir)
if e.lower() == '.pdf' or e.lower() == '.png' : # compress and make a thumbnail
thumb = '%s.%s' % (dname,self.thumbformat)
pdff = os.path.join(self.opts.output_dir,fname)
retval = self.compressPDF(inpdf=pdff,thumbformat=self.thumbformat)
if retval == 0:
pdflist.append((fname,thumb))
else:
pdflist.append((fname,fname))
if (rownum+1) % 2 == 0:
fhtml.append('<tr class="odd_row"><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
else:
fhtml.append('<tr><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
for logfname in logfiles: # expect at least tlog - if more
if os.path.abspath(logfname) == os.path.abspath(self.tlog): # handled later
sectionname = 'All tool run'
if (len(logfiles) > 1):
sectionname = 'Other'
ourpdfs = pdflist
else:
realname = os.path.basename(logfname)
sectionname = os.path.splitext(realname)[0].split('_')[0] # break in case _ added to log
ourpdfs = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] == sectionname]
pdflist = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] <> sectionname] # remove
nacross = 1
npdf = len(ourpdfs)
#if npdf > 0:
# nacross = math.sqrt(npdf) ## int(round(math.log(npdf,2)))
# if int(nacross)**2 != npdf:
# nacross += 1
# nacross = int(nacross)
# width = min(400,int(1200/nacross))
# html.append('<div class="toolFormTitle">%s images and outputs</div>' % sectionname)
# html.append('(Click on a thumbnail image to download the corresponding original PDF image)<br/>')
# ntogo = nacross # counter for table row padding with empty cells
# html.append('<div><table class="simple" cellpadding="2" cellspacing="2">\n<tr>')
# for i,paths in enumerate(ourpdfs):
# fname,thumb = paths
# s= """<td><a href="%s"><img src="%s" title="Click to download a PDF of %s" hspace="5" width="%d"
# alt="Image called %s"/></a></td>\n""" % (fname,thumb,fname,width,fname)
# if ((i+1) % nacross == 0):
# s += '</tr>\n'
# ntogo = 0
# if i < (npdf - 1): # more to come
# s += '<tr>'
# ntogo = nacross
# else:
# ntogo -= 1
# html.append(s)
# if html[-1].strip().endswith('</tr>'):
# html.append('</table></div>\n')
# else:
# if ntogo > 0: # pad
# html.append('<td> </td>'*ntogo)
# html.append('</tr></table></div>\n')
#logt = open(logfname,'r').readlines()
#logtext = [x for x in logt if x.strip() > '']
#html.append('<div class="toolFormTitle">%s log output</div>' % sectionname)
#if len(logtext) > 1:
# html.append('\n<pre>\n')
# html += logtext
# html.append('\n</pre>\n')
#else:
# html.append('%s is empty<br/>' % logfname)
if len(fhtml) > 0:
fhtml.insert(0,'<div><table class="colored" cellpadding="3" cellspacing="3"><tr><th>Output File Name (click to view)</th><th>Size</th></tr>\n')
fhtml.append('</table></div><br/>')
html.append('<div class="toolFormTitle">All output files available for downloading</div>\n')
html += fhtml # add all non-pdf files to the end of the display
else:
html.append('<div class="warningmessagelarge">### Error - %s returned no files - please confirm that parameters are sane</div>' % self.opts.interpreter)
html.append(galhtmlpostfix)
htmlf = file(self.opts.output_html,'w')
htmlf.write('\n'.join(html))
htmlf.write('\n')
htmlf.close()
self.html = html
def run(self):
"""
scripts must be small enough not to fill the pipe!
"""
if self.treatbashSpecial and self.opts.interpreter in ['bash','sh']:
retval = self.runBash()
else:
if self.opts.output_dir:
ste = open(self.elog,'w')
sto = open(self.tlog,'w')
sto.write('## Toolfactory generated command line = %s\n' % ' '.join(self.cl))
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
#p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=ste,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False,stdin=subprocess.PIPE)
p.stdin.write(self.script)
#p.stdin.close()
stdout_data, stderr_data = p.communicate()
p.stdin.close()
#retval = p.wait()
retval = p.returncode
if self.opts.output_dir:
sto.close()
ste.close()
err = stderr_data
#err = open(self.elog,'r').readlines()
print >> sys.stdout, stdout_data
if retval <> 0 and err: # problem
print >> sys.stderr,err
if self.opts.make_HTML:
self.makeHtml()
return retval
def runBash(self):
"""
cannot use - for bash so use self.sfile
"""
if self.opts.output_dir:
s = '## Toolfactory generated command line = %s\n' % ' '.join(self.cl)
sto = open(self.tlog,'w')
sto.write(s)
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=sto,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False)
retval = p.wait()
if self.opts.output_dir:
sto.close()
if self.opts.make_HTML:
self.makeHtml()
return retval
def main():
u = """
This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
<command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath" --tool_name "foo" --interpreter "Rscript"
</command>
"""
op = optparse.OptionParser()
a = op.add_option
a('--script_path',default=None)
a('--tool_name',default=None)
a('--interpreter',default=None)
a('--output_dir',default='./')
a('--output_html',default=None)
a('--input_tab',default="None")
a('--notuse_flex',default="TRUE")
a('--dropout_base',default="4")
a('--failed_base',default="3")
a('--input_formats',default="tabular,text")
a('--output_tab',default="None")
a('--output_format',default="tabular")
a('--user_email',default='Unknown')
a('--bad_user',default=None)
a('--make_Tool',default=None)
a('--make_HTML',default=None)
a('--help_text',default=None)
a('--citations',default=None)
a('--tool_desc',default=None)
a('--new_tool',default=None)
a('--tool_version',default=None)
a('--include_dependencies',default=None)
a('--count_fpkm',default=None)
opts, args = op.parse_args()
assert not opts.bad_user,'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to admin_users in universe_wsgi.ini' % (opts.bad_user,opts.bad_user)
assert opts.tool_name,'## Tool Factory expects a tool name - eg --tool_name=DESeq'
assert opts.interpreter,'## Tool Factory wrapper expects an interpreter - eg --interpreter=Rscript'
assert os.path.isfile(opts.script_path),'## Tool Factory wrapper expects a script path - eg --script_path=foo.R'
if opts.output_dir:
try:
os.makedirs(opts.output_dir)
except:
pass
r = ScriptRunner(opts)
if opts.make_Tool:
retcode = r.makeTooltar()
else:
retcode = r.run()
os.unlink(r.sfile)
if retcode:
sys.exit(retcode) # indicate failure to job runner
if __name__ == "__main__":
main()
|
myoshimura080822/tools_of_rnaseq_on_docker_galaxy
|
SCDE_Wrapper/SCDEwrapper.py
|
Python
|
mit
| 32,783
|
[
"Galaxy"
] |
a1be52be0b6b2b74b0f229d9cf623a37b268eeffa2e02f808750182839baefe3
|
def countup(n):
if n >= 10:
print "Blast off"
else:
print n
countup(n + 1)
def main():
countup(0)
countup(1)
countup(-20)
return
main()
|
suay1936/suay1936-cmis-cs2
|
countup.py
|
Python
|
cc0-1.0
| 190
|
[
"BLAST"
] |
714d1891b8e1795cf4727da8c8b812c39f4bef78282ff870784d6ddd8f17b76c
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.prop.gtensor import uhf
UHF = uhf.GTensor
try:
from pyscf.prop.gtensor import uks
UKS = uks.GTensor
except ImportError:
pass
|
gkc1000/pyscf
|
pyscf/prop/gtensor/__init__.py
|
Python
|
apache-2.0
| 783
|
[
"PySCF"
] |
7534bee9ccd8043e7264e61e71921378f31e154d7c3cec6b3cc8c30b3f6942f3
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import subprocess
import sys
from abc import ABCMeta, abstractmethod
from ansible.cli.arguments import option_helpers as opt_help
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types, PY3
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.release import __version__
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
try:
import argcomplete
HAS_ARGCOMPLETE = True
except ImportError:
HAS_ARGCOMPLETE = False
display = Display()
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
'changing source of code and can become unstable at any point.'
)
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1].get('version')
date = deprecated[1].get('date')
collection_name = deprecated[1].get('collection_name')
display.deprecated("%s option, %s%s" % (name, why, alt),
version=ver, date=date, collection_name=collection_name)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
@staticmethod
def _get_secret(prompt):
secret = getpass.getpass(prompt=prompt)
if secret:
secret = to_unsafe_text(secret)
return secret
@staticmethod
def ask_passwords():
''' prompt for connection and become passwords if needed '''
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
become_prompt = "%s password: " % become_prompt_method
if op['ask_pass']:
sshpass = CLI._get_secret("SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
elif op['connection_password_file']:
sshpass = CLI.get_password_from_file(op['connection_password_file'])
if op['become_ask_pass']:
becomepass = CLI._get_secret(become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
elif op['become_password_file']:
becomepass = CLI.get_password_from_file(op['become_password_file'])
except EOFError:
pass
return (sshpass, becomepass)
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, usage="", desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(os.path.basename(self.args[0]), usage=usage, desc=desc, epilog=epilog, )
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, string_types):
options.listofhosts = string_types.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
# More specifically, we want `--tags` to be additive. So we cannot
# simply change C.TAGS_RUN's default to ["all"] because then passing
# --tags foo would cause us to have ['all', 'foo']
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, string_types):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
# Dup args set on the root parser and sub parsers results in the root parser ignoring the args. e.g. doing
# 'ansible-galaxy -vvv init' has no verbosity set but 'ansible-galaxy init -vvv' sets a level of 3. To preserve
# back compat with pre-argparse changes we manually scan and set verbosity based on the argv values.
if self.parser.prog in ['ansible-galaxy', 'ansible-vault'] and not options.verbosity:
verbosity_arg = next(iter([arg for arg in self.args if arg.startswith('-v')]), None)
if verbosity_arg:
display.deprecated("Setting verbosity before the arg sub command is deprecated, set the verbosity "
"after the sub command", "2.13", collection_name='ansible.builtin')
options.verbosity = verbosity_arg.count('v')
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
try:
options = self.parser.parse_args(self.args[1:])
except SystemExit as e:
if(e.code != 0):
self.parser.exit(status=2, message=" \n%s" % self.parser.format_help())
raise
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@staticmethod
def _play_prereqs():
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
AnsibleCollectionConfig.playbook_paths = basedir
default_collection = _get_collection_name_from_path(basedir)
if default_collection:
display.warning(u'running with default collection {0}'.format(default_collection))
AnsibleCollectionConfig.default_collection = default_collection
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'])
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
@staticmethod
def get_password_from_file(pwd_file):
b_pwd_file = to_bytes(pwd_file)
secret = None
if b_pwd_file == b'-':
if PY3:
# ensure its read as bytes
secret = sys.stdin.buffer.read()
else:
secret = sys.stdin.read()
elif not os.path.exists(b_pwd_file):
raise AnsibleError("The password file %s was not found" % pwd_file)
elif os.path.is_executable(b_pwd_file):
display.vvvv(u'The password file %s is a script.' % to_text(pwd_file))
cmd = [b_pwd_file]
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem occured when trying to run the password script %s (%s)."
" If this is not a script, remove the executable bit from the file." % (pwd_file, e))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("The password script %s returned an error (rc=%s): %s" % (pwd_file, p.returncode, stderr))
secret = stdout
else:
try:
f = open(b_pwd_file, "rb")
secret = f.read().strip()
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read password file %s: %s" % (pwd_file, e))
secret = secret.strip(b'\r\n')
if not secret:
raise AnsibleError('Empty password was provided from file (%s)' % pwd_file)
return to_unsafe_text(secret)
|
thnee/ansible
|
lib/ansible/cli/__init__.py
|
Python
|
gpl-3.0
| 22,791
|
[
"Galaxy"
] |
2fa3ecb8e206e93c507195709b5fa3966dc90a43bda7d6d71c40b5cfb2ace3e1
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv2 import Fmat_original_hshv
from data_variable_hslv2 import Fmat_original_hslv
from data_variable_lshv2 import Fmat_original_lshv
from data_variable_lslv2 import Fmat_original_lslv
# Scaling function
def scaling(mat):
Fvec_a = mat[0:81,0:]
Fvec_b = mat[81:162,0:]
Fvec_c = mat[162:243,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((20,1))
mu_2 = np.zeros((20,1))
cov = np.zeros((20,2,2))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
# Scaling wrt all data
Fmat_rf_hshv = scaling(Fmat_original_hshv[:,0:15])
Fmat_rm_hshv = Fmat_original_hshv[:,15:15]
Fmat_sf_hshv = scaling(Fmat_original_hshv[:,15:26])
Fmat_sm_hshv = scaling(Fmat_original_hshv[:,26:33])
Fmat_hshv = np.matrix(np.column_stack((Fmat_rf_hshv,Fmat_rm_hshv,Fmat_sf_hshv,Fmat_sm_hshv)))
Fmat_rf_hslv = scaling(Fmat_original_hslv[:,0:15])
Fmat_rm_hslv = scaling(Fmat_original_hslv[:,15:30])
Fmat_sf_hslv = scaling(Fmat_original_hslv[:,30:45])
Fmat_sm_hslv = scaling(Fmat_original_hslv[:,45:56])
Fmat_hslv = np.matrix(np.column_stack((Fmat_rf_hslv,Fmat_rm_hslv,Fmat_sf_hslv,Fmat_sm_hslv)))
Fmat_rf_lshv = scaling(Fmat_original_lshv[:,0:15])
Fmat_rm_lshv = scaling(Fmat_original_lshv[:,15:16])
Fmat_sf_lshv = scaling(Fmat_original_lshv[:,16:23])
Fmat_sm_lshv = scaling(Fmat_original_lshv[:,23:32])
Fmat_lshv = np.matrix(np.column_stack((Fmat_rf_lshv,Fmat_rm_lshv,Fmat_sf_lshv,Fmat_sm_lshv)))
Fmat_rf_lslv = scaling(Fmat_original_lslv[:,0:15])
Fmat_rm_lslv = scaling(Fmat_original_lslv[:,15:28])
Fmat_sf_lslv = scaling(Fmat_original_lslv[:,28:37])
Fmat_sm_lslv = scaling(Fmat_original_lslv[:,37:45])
Fmat_lslv = np.matrix(np.column_stack((Fmat_rf_lslv,Fmat_rm_lslv,Fmat_sf_lslv,Fmat_sm_lslv)))
Fmat = np.matrix(np.column_stack((Fmat_hshv,Fmat_hslv,Fmat_lshv,Fmat_lslv)))
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.05] * 20
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_force_hshv,mu_rf_motion_hshv,cov_rf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))))
mu_rm_force_hshv,mu_rm_motion_hshv,cov_rm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))))
mu_sf_force_hshv,mu_sf_motion_hshv,cov_sf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))))
mu_sm_force_hshv,mu_sm_motion_hshv,cov_sm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = [0.0]*20
B_rm_hshv = [0.0]*20
B_sf_hshv = [0.0]*20
B_sm_hshv = [0.0]*20
for num_states in range(20):
B_rf_hshv[num_states] = [[mu_rf_force_hshv[num_states][0],mu_rf_motion_hshv[num_states][0]],[cov_rf_hshv[num_states][0][0],cov_rf_hshv[num_states][0][1],cov_rf_hshv[num_states][1][0],cov_rf_hshv[num_states][1][1]]]
B_rm_hshv[num_states] = [[mu_rm_force_hshv[num_states][0],mu_rm_motion_hshv[num_states][0]],[cov_rm_hshv[num_states][0][0],cov_rm_hshv[num_states][0][1],cov_rm_hshv[num_states][1][0],cov_rm_hshv[num_states][1][1]]]
B_sf_hshv[num_states] = [[mu_sf_force_hshv[num_states][0],mu_sf_motion_hshv[num_states][0]],[cov_sf_hshv[num_states][0][0],cov_sf_hshv[num_states][0][1],cov_sf_hshv[num_states][1][0],cov_sf_hshv[num_states][1][1]]]
B_sm_hshv[num_states] = [[mu_sm_force_hshv[num_states][0],mu_sm_motion_hshv[num_states][0]],[cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]]]
print cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]
print "----"
#print B_sm_hshv
#print mu_sm_motion_hshv
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))
total_seq_rm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))
total_seq_sf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))
total_seq_sm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))
total_seq_rf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))
total_seq_rm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))
total_seq_sf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))
total_seq_sm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))
total_seq_rf_hshv = np.zeros((162,45))
total_seq_rm_hshv = np.zeros((162,29))
total_seq_sf_hshv = np.zeros((162,31))
total_seq_sm_hshv = np.zeros((162,28))
i = 0
j = 0
while i < 162:
total_seq_rf_hshv[i] = total_seq_rf_force_hshv[j]
total_seq_rf_hshv[i+1] = total_seq_rf_motion_hshv[j]
total_seq_rm_hshv[i] = total_seq_rm_force_hshv[j]
total_seq_rm_hshv[i+1] = total_seq_rm_motion_hshv[j]
total_seq_sf_hshv[i] = total_seq_sf_force_hshv[j]
total_seq_sf_hshv[i+1] = total_seq_sf_motion_hshv[j]
total_seq_sm_hshv[i] = total_seq_sm_force_hshv[j]
total_seq_sm_hshv[i+1] = total_seq_sm_motion_hshv[j]
j=j+1
i=i+2
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = np.zeros((162,33))
total_seq_obj_force_hshv = Fmat_hshv[0:81,:]
total_seq_obj_motion_hshv = Fmat_hshv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_hshv[i] = total_seq_obj_force_hshv[j]
total_seq_obj_hshv[i+1] = total_seq_obj_motion_hshv[j]
j=j+1
i=i+2
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[:,k]).T).tolist()
new_test_seq_obj_hshv = np.array(test_seq_obj_hshv)
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,15:26])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,26:33])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,15:26])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,26:33])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,15:26])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,26:33])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,15:26])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,26:33])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_force_hslv,mu_rf_motion_hslv,cov_rf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))))
mu_rm_force_hslv,mu_rm_motion_hslv,cov_rm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))))
mu_sf_force_hslv,mu_sf_motion_hslv,cov_sf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))))
mu_sm_force_hslv,mu_sm_motion_hslv,cov_sm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = [0.0]*20
B_rm_hslv = [0.0]*20
B_sf_hslv = [0.0]*20
B_sm_hslv = [0.0]*20
for num_states in range(20):
B_rf_hslv[num_states] = [[mu_rf_force_hslv[num_states][0],mu_rf_motion_hslv[num_states][0]],[cov_rf_hslv[num_states][0][0],cov_rf_hslv[num_states][0][1],cov_rf_hslv[num_states][1][0],cov_rf_hslv[num_states][1][1]]]
B_rm_hslv[num_states] = [[mu_rm_force_hslv[num_states][0],mu_rm_motion_hslv[num_states][0]],[cov_rm_hslv[num_states][0][0],cov_rm_hslv[num_states][0][1],cov_rm_hslv[num_states][1][0],cov_rm_hslv[num_states][1][1]]]
B_sf_hslv[num_states] = [[mu_sf_force_hslv[num_states][0],mu_sf_motion_hslv[num_states][0]],[cov_sf_hslv[num_states][0][0],cov_sf_hslv[num_states][0][1],cov_sf_hslv[num_states][1][0],cov_sf_hslv[num_states][1][1]]]
B_sm_hslv[num_states] = [[mu_sm_force_hslv[num_states][0],mu_sm_motion_hslv[num_states][0]],[cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]]]
print cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]
print "----"
#print B_sm_hslv
#print mu_sm_motion_hslv
# generate RF, RM, SF, SM models from parameters
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))
total_seq_rm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))
total_seq_sf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))
total_seq_sm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))
total_seq_rf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))
total_seq_rm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))
total_seq_sf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))
total_seq_sm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))
total_seq_rf_hslv = np.zeros((162,45))
total_seq_rm_hslv = np.zeros((162,14))
total_seq_sf_hslv = np.zeros((162,27))
total_seq_sm_hslv = np.zeros((162,24))
i = 0
j = 0
while i < 162:
total_seq_rf_hslv[i] = total_seq_rf_force_hslv[j]
total_seq_rf_hslv[i+1] = total_seq_rf_motion_hslv[j]
total_seq_rm_hslv[i] = total_seq_rm_force_hslv[j]
total_seq_rm_hslv[i+1] = total_seq_rm_motion_hslv[j]
total_seq_sf_hslv[i] = total_seq_sf_force_hslv[j]
total_seq_sf_hslv[i+1] = total_seq_sf_motion_hslv[j]
total_seq_sm_hslv[i] = total_seq_sm_force_hslv[j]
total_seq_sm_hslv[i+1] = total_seq_sm_motion_hslv[j]
j=j+1
i=i+2
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = np.zeros((162,56))
total_seq_obj_force_hslv = Fmat_hslv[0:81,:]
total_seq_obj_motion_hslv = Fmat_hslv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_hslv[i] = total_seq_obj_force_hslv[j]
total_seq_obj_hslv[i+1] = total_seq_obj_motion_hslv[j]
j=j+1
i=i+2
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[:,k]).T).tolist()
new_test_seq_obj_hslv = np.array(test_seq_obj_hslv)
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:56])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:56])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:56])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:56])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_force_lshv,mu_rf_motion_lshv,cov_rf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lslv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lslv[162:243,0:15])))))
mu_rm_force_lshv,mu_rm_motion_lshv,cov_rm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lslv[0:81,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lslv[162:243,15:28])))))
mu_sf_force_lshv,mu_sf_motion_lshv,cov_sf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lslv[0:81,28:37])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lslv[162:243,28:37])))))
mu_sm_force_lshv,mu_sm_motion_lshv,cov_sm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lslv[0:81,37:45])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lslv[162:243,37:45])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = [0.0]*20
B_rm_lshv = [0.0]*20
B_sf_lshv = [0.0]*20
B_sm_lshv = [0.0]*20
for num_states in range(20):
B_rf_lshv[num_states] = [[mu_rf_force_lshv[num_states][0],mu_rf_motion_lshv[num_states][0]],[cov_rf_lshv[num_states][0][0],cov_rf_lshv[num_states][0][1],cov_rf_lshv[num_states][1][0],cov_rf_lshv[num_states][1][1]]]
B_rm_lshv[num_states] = [[mu_rm_force_lshv[num_states][0],mu_rm_motion_lshv[num_states][0]],[cov_rm_lshv[num_states][0][0],cov_rm_lshv[num_states][0][1],cov_rm_lshv[num_states][1][0],cov_rm_lshv[num_states][1][1]]]
B_sf_lshv[num_states] = [[mu_sf_force_lshv[num_states][0],mu_sf_motion_lshv[num_states][0]],[cov_sf_lshv[num_states][0][0],cov_sf_lshv[num_states][0][1],cov_sf_lshv[num_states][1][0],cov_sf_lshv[num_states][1][1]]]
B_sm_lshv[num_states] = [[mu_sm_force_lshv[num_states][0],mu_sm_motion_lshv[num_states][0]],[cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]]]
print cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]
print "----"
#print B_sm_lshv
#print mu_sm_motion_lshv
# generate RF, RM, SF, SM models from parameters
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lslv[0:81,0:15])))
total_seq_rm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lslv[0:81,15:28])))
total_seq_sf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lslv[0:81,28:37])))
total_seq_sm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lslv[0:81,37:45])))
total_seq_rf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lslv[162:243,0:15])))
total_seq_rm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lslv[162:243,15:28])))
total_seq_sf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lslv[162:243,28:37])))
total_seq_sm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lslv[162:243,37:45])))
total_seq_rf_lshv = np.zeros((162,45))
total_seq_rm_lshv = np.zeros((162,28))
total_seq_sf_lshv = np.zeros((162,35))
total_seq_sm_lshv = np.zeros((162,26))
i = 0
j = 0
while i < 162:
total_seq_rf_lshv[i] = total_seq_rf_force_lshv[j]
total_seq_rf_lshv[i+1] = total_seq_rf_motion_lshv[j]
total_seq_rm_lshv[i] = total_seq_rm_force_lshv[j]
total_seq_rm_lshv[i+1] = total_seq_rm_motion_lshv[j]
total_seq_sf_lshv[i] = total_seq_sf_force_lshv[j]
total_seq_sf_lshv[i+1] = total_seq_sf_motion_lshv[j]
total_seq_sm_lshv[i] = total_seq_sm_force_lshv[j]
total_seq_sm_lshv[i+1] = total_seq_sm_motion_lshv[j]
j=j+1
i=i+2
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = np.zeros((162,32))
total_seq_obj_force_lshv = Fmat_lshv[0:81,:]
total_seq_obj_motion_lshv = Fmat_lshv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_lshv[i] = total_seq_obj_force_lshv[j]
total_seq_obj_lshv[i+1] = total_seq_obj_motion_lshv[j]
j=j+1
i=i+2
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[:,k]).T).tolist()
new_test_seq_obj_lshv = np.array(test_seq_obj_lshv)
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:23])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,23:32])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:23])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,23:32])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:23])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,23:32])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:23])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,23:32])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_force_lslv,mu_rf_motion_lslv,cov_rf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15])))))
mu_rm_force_lslv,mu_rm_motion_lslv,cov_rm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16])))))
mu_sf_force_lslv,mu_sf_motion_lslv,cov_sf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23])))))
mu_sm_force_lslv,mu_sm_motion_lslv,cov_sm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = [0.0]*20
B_rm_lslv = [0.0]*20
B_sf_lslv = [0.0]*20
B_sm_lslv = [0.0]*20
for num_states in range(20):
B_rf_lslv[num_states] = [[mu_rf_force_lslv[num_states][0],mu_rf_motion_lslv[num_states][0]],[cov_rf_lslv[num_states][0][0],cov_rf_lslv[num_states][0][1],cov_rf_lslv[num_states][1][0],cov_rf_lslv[num_states][1][1]]]
B_rm_lslv[num_states] = [[mu_rm_force_lslv[num_states][0],mu_rm_motion_lslv[num_states][0]],[cov_rm_lslv[num_states][0][0],cov_rm_lslv[num_states][0][1],cov_rm_lslv[num_states][1][0],cov_rm_lslv[num_states][1][1]]]
B_sf_lslv[num_states] = [[mu_sf_force_lslv[num_states][0],mu_sf_motion_lslv[num_states][0]],[cov_sf_lslv[num_states][0][0],cov_sf_lslv[num_states][0][1],cov_sf_lslv[num_states][1][0],cov_sf_lslv[num_states][1][1]]]
B_sm_lslv[num_states] = [[mu_sm_force_lslv[num_states][0],mu_sm_motion_lslv[num_states][0]],[cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]]]
print cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]
print "----"
#print B_sm_lslv
#print mu_sm_motion_lslv
# generate RF, RM, SF, SM models from parameters
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15])))
total_seq_rm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16])))
total_seq_sf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23])))
total_seq_sm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32])))
total_seq_rf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15])))
total_seq_rm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16])))
total_seq_sf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23])))
total_seq_sm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32])))
total_seq_rf_lslv = np.zeros((162,45))
total_seq_rm_lslv = np.zeros((162,16))
total_seq_sf_lslv = np.zeros((162,33))
total_seq_sm_lslv = np.zeros((162,27))
i = 0
j = 0
while i < 162:
total_seq_rf_lslv[i] = total_seq_rf_force_lslv[j]
total_seq_rf_lslv[i+1] = total_seq_rf_motion_lslv[j]
total_seq_rm_lslv[i] = total_seq_rm_force_lslv[j]
total_seq_rm_lslv[i+1] = total_seq_rm_motion_lslv[j]
total_seq_sf_lslv[i] = total_seq_sf_force_lslv[j]
total_seq_sf_lslv[i+1] = total_seq_sf_motion_lslv[j]
total_seq_sm_lslv[i] = total_seq_sm_force_lslv[j]
total_seq_sm_lslv[i+1] = total_seq_sm_motion_lslv[j]
j=j+1
i=i+2
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = np.zeros((162,45))
total_seq_obj_force_lslv = Fmat_lslv[0:81,:]
total_seq_obj_motion_lslv = Fmat_lslv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_lslv[i] = total_seq_obj_force_lslv[j]
total_seq_obj_lslv[i+1] = total_seq_obj_motion_lslv[j]
j=j+1
i=i+2
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[:,k]).T).tolist()
new_test_seq_obj_lslv = np.array(test_seq_obj_lslv)
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
# Find Viterbi Path
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:37])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,37:45])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:37])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,37:45])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:37])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,37:45])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:37])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,37:45])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_motion_20_states.png')
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 0.8s/hmm_crossvalidation_force_motion_20_states_scaled_wrt_all_data.py
|
Python
|
mit
| 41,532
|
[
"Mayavi"
] |
52404e00a659ecbb96944e23e6fd9f1edd90b4f9993eb3b4ac3e92ce0da9ecf5
|
__author__="Marina Wahl"
"""
In the MVC paradigm, models are the database interface
"""
from datetime import datetime
import hashlib
# Werkzeug's security module implements secure password hashing
# through these two functions
from werkzeug.security import generate_password_hash, check_password_hash
# for hashing
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app, request
# for rich text
from markdown import markdown
import bleach
from flask.ext.login import UserMixin, AnonymousUserMixin
from . import db, login_manager
"""
PERMISSIONS Class:
Users are assigned a discrete role, but the roles are defined in terms of
permissions.
The default field is true for only one role.
The permission fields is an integer used as bit flags, each task will have
a bit position, and for each role, the tasks that are allowed for that role
have their bits set to 1.
"""
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
"""
FOLLOW Class
"""
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
"""
ROLE Class
"""
class Role(db.Model):
__tablename__ = 'roles'
# SLQAlchemy requires all modesl to define a primary key
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
# lazy='dynamic' to request that the query is not automatically
# executed, so filters can be addes
users = db.relationship('User', backref='role', lazy='dynamic')
# create roles in the database
# To apply roles to the database:
# $ Role.insert_roles()
# $ Role.query.all()
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
# readable string representation
def __repr__(self):
return 'Role %r' % self.name
"""
USER Class
"""
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
# A many-to-many relationship implemented as two one to-many relationship
# Follow instances, where each one has the follower and followed back reference
# properties set to the respective users. The lazy='joined' mode enables this all
# to happen from a single database query. If lazy is set to the default value of
# select , then the follower and followed users are loaded lazily when they are
# first accessed and each attribute will require an individual query, which means
# that obtaining the complete list of followed users would require 100 additional
# database queries.
# The cascade argument configures how actions performed on a parent object propagate
# to related objects. An example of a cascade option is the rule that says that when an
# object is added to the database session, any objects associated with it through
# relationships should automatically be added to the session as well. The default
# cascade options are appropriate for most situations, but there is one case in which
# the default cascade options do not work well for this many-to-many relationship.
followed = db.relationship('Follow',
foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
# Comments in posts
comments = db.relationship('Comment', backref='author', lazy='dynamic')
# show own posts together with follower points
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
# define a default role for users
if self.role is None:
if self.email == current_app.config['ANTISOCIAL_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
# to get the gravatar address
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.followed.append(Follow(followed=self))
# password hashing and unhashing
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
#print(type(password))
#print(type(self.password_hash))
#print(type(self.password_hash.encode('utf-8')))
return check_password_hash(self.password_hash.encode('utf-8'), password.encode('utf-8'))
# for email authentication and token generations
def generate_confirmation_token(self, expiration=3000):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3000):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
# reseting user password
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
# to change email
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
# permissions verification for a user
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
# to set date/time in signup and posts (refresh last visit)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
# readable str rep
def __repr__(self):
return 'User %r' % self.username
# create fake data
# User.generate_fake()
@staticmethod
def generate_fake(count=10):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
# the following/follower propriety
# this method inserts a Follow instance in the association table
# that links a follower with a followed user
def follow(self, user):
if not self.is_following(user):
f = Follow(followed=user)
self.followed.append(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
self.followed.remove(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first() is not None
# Join union database to only show the relevant posts
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id)\
.filter(Follow.follower_id == self.id)
"""
POST Class:
A blog post is represented by a body, a timestamp, and a one-to-may
relationship from the User model.
"""
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
# db .Text gives no limitation on the length
body = db.Column(db.Text)
# rich text
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
# create fake posts
@staticmethod
def generate_fake(count=10):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 3)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
# rich text
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
# rich text event listener: it will automatically be invoked whenever
# the body field on any instance of the class is set to a new value
# The conversion is done in three steps: first the markdown() function does
# an initial conversion to HTML. The result is passed to clean(), along with
# a list of approved HTML tags (removes any tag that are not in the whitelist).
# The final conversion is done with linkify(), that converts any URL written
# in plain text into a proper <a>
db.event.listen(Post.body, 'set', Post.on_changed_body)
"""
ANONYMONUS Class
"""
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
# User loader callback function, that loads a
# user, given the identifier (as a unicode string)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
"""
COMMENTS class
"""
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
|
bt3gl/The-Anti-Social-Network
|
app/models.py
|
Python
|
mit
| 15,070
|
[
"VisIt"
] |
aa605f10388a8334154a27d4ac5db3fa1c560cbe50fcf74f1809b6574bdbcad3
|
"""
=================
Confidence Levels
=================
When setting the sigma levels for ChainConsumer, we need to be careful
if we are talking about 1D or 2D Gaussians. For 1D Gaussians, 1 and 2 :math:`\sigma` correspond
to 68% and 95% confidence levels. However, for a a 2D Gaussian, integrating over 1 and 2 :math:`\sigma`
levels gives 39% and 86% confidence levels.
By default ChainConsumer uses the 2D levels, such that the contours will line up and agree with the
marginalised distributions shown above them, however you can also choose to switch to using the 1D
Gaussian method, such that the contour encloses 68% and 95% confidence regions, by switching `sigma2d` to `False`
"""
import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer
np.random.seed(0)
data = multivariate_normal([0, 0], [[1, 0], [0, 1]], size=1000000)
c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$"])
c.configure(flip=False, sigma2d=False, sigmas=[1, 2]) # The default case, so you don't need to specify sigma2d
fig = c.plotter.plot()
fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
###############################################################################
# Demonstrating the 1D Gaussian confidence levels. Notice the change in contour size
# The contours shown below now show the 68% and 95% confidence regions.
c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$"])
c.configure(flip=False, sigma2d=True, sigmas=[1, 2])
fig = c.plotter.plot()# -*- coding: utf-8 -*-
fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
|
Samreay/ChainConsumer
|
examples/customisations/plot_confidence_levels.py
|
Python
|
mit
| 1,673
|
[
"Gaussian"
] |
034f11487b92734375843fd66e6cdcaebf70e09410f4056ec32e72ef1bdae9a8
|
#!/usr/bin/python
#
# Copyright (C) 2015 by Markus Meinert
# This file is distributed under the terms of the GNU General Public License.
# See the file COPYING for license details.
#
# http://elk.sourceforge.net/
#
# elk-optics.py v1.0
#
# This script comes with no warranty. Check the results
# carefully for production use.
#
# Description:
# Reads all EPSILON_ii.OUT files in the present directory
# and computes optical quantities. Diagonal components only.
#
# Input: EPSILON_ii.OUT
#
# Output: energy (eV), Re(eps), Im(eps),
# refractive index Re(n), Im(n),
# normal incidence reflectivity R,
# absorption coefficient alpha (m^-1),
# EELS -1/Im(eps)
#
# Output is written to optics_ii.out
#
import sys, os, math, cmath
# check which files of type EPSILON_ii.OUT exist and
# return a list of present components
def get_components():
possible = ['11', '22', '33']
present = []
for p in possible:
testfilename = 'EPSILON_%s.OUT' % p
if os.path.isfile(testfilename):
present.append(p)
return present
# read the EPSILON_ii.OUT file
# return lists of energies and complex eps
def read_epsilon(filename):
handle = open(filename, 'r')
content = handle.readlines()
handle.close()
data = [[],[]]
for line in content:
l = line.split()
if l == []:
continue
data[0].append(float(l[0]))
data[1].append(float(l[1]))
# energies are read from first column of the first data block
# real part of epsilon is read from the second column of the first data block (first half of the data)
# imaginary part of epsilon is read from the second column of the second data block (second half of the data)
datalength = int( len( data[0] ) / 2.)
energies = data[0][0:datalength]
eps_cplx = [complex(a,b) for a,b in zip(data[1][0:datalength], data[1][datalength:])]
return energies, eps_cplx
# compute optical properties from energies and complex epsilon
def write_optical_properties(energies, eps_cplx, component):
# complex refractive index N and extinction coefficient kappa
# complex refractive index: N = n_r + ik
N = [cmath.sqrt(x1) for x1 in eps_cplx]
k = [cmath.sqrt(x1).imag for x1 in eps_cplx]
# normal incidence reflectivity from complex refractive index
R = [abs((1.-x1)/(1.+x1))**2 for x1 in N]
# absorption coefficient in SI units from extinction coefficient and energy
Ha_to_J = 27.21138602 * 1.6021766208E-19
hbar = 6.626070040E-34 / (2 * math.pi)
c = 2.99792458E8
Ha_to_omegaSI = Ha_to_J / hbar
alpha = [2 * (x1 * Ha_to_omegaSI) / c * x2 for x1, x2 in zip(energies, k)]
# format data and write to file optics_ii.out
data = zip(energies, eps_cplx, N, R, alpha)
output = '%14s %14s %14s %14s %14s %14s %14s %14s\n' % ('# energy (eV)', 'Re(eps)', 'Im(eps)', 'Re(n)', 'Im(n)', 'R', 'alpha (m^-1)', 'EELS')
for line in data:
output += '%14.6e %14.6e %14.6e %14.6e %14.6e %14.6e %14.6e %14.6e\n' % (line[0]*27.21138602, line[1].real, line[1].imag, line[2].real, line[2].imag, line[3], line[4], -(1/line[1]).imag)
outfilename = 'optics_%s.out' % component
outfile = open(outfilename, 'w')
outfile.write(output)
outfile.close()
# main loop over diagonal components of the epsilon tensor
print('===================')
print('| elk-optics v1.0 |')
print('===================')
print
print('Looking for EPSILON_ii.OUT files...')
components = get_components()
if components == []:
sys.exit('No EPSILON_ii.OUT files found. Exit.\n')
else:
print('Files found:')
for c in components:
print(' EPSILON_%s.OUT') % c
print
for c in components:
filename = 'EPSILON_%s.OUT' % c
print('Working on %s ...') % filename
energies, eps_cplx = read_epsilon(filename)
write_optical_properties(energies, eps_cplx, c)
print('Optical properties written to optics_%s.out') % c
print
|
rohkeaID/elk-w90-improved
|
utilities/elk-optics/elk-optics.py
|
Python
|
gpl-3.0
| 3,765
|
[
"Elk"
] |
23d29f2b7fcdaae8bf0b4d6d78ec2a80083c15a6cd03e4b57016dd096f29f4fb
|
#! /usr/bin/env python
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
"""
Script to generate a crack slab, and apply initial strain ramp
James Kermode <james.kermode@kcl.ac.uk>
August 2014
"""
import numpy as np
import ase.io
import ase.units as units
from matscipy.fracture_mechanics.crack import thin_strip_displacement_y
from some_tools import relax_structure, shrink_cell
import sys
sys.path.insert(0, '.')
import params
swap = np.loadtxt('swap_topbottom_atoms.csv', dtype='int')
a = params.a.copy()
a.set_calculator(params.calc)
directions = [params.crack_direction,
params.cleavage_plane,
params.crack_front]
# now, we build system aligned with requested crystallographic orientation
unit_slab = params.unit_slab
print('Unit slab with %d atoms per unit cell:' % len(unit_slab))
print(unit_slab.cell)
print('')
pos_0 = np.array([-21.5, -2.9, 31.])
pos_1 = np.array([-21.4, -1.38, 30.6])
r = unit_slab.get_positions()
atom_0, atom_1 = [np.argsort([np.linalg.norm(r_i - pos) for r_i in r])[0]
for pos in [pos_0, pos_1]]
# atom_0, atom_1 = 0, 1
# center vertically half way along the vertical bond between atoms 0 and 1
unit_slab.positions[:, 1] += (unit_slab.positions[atom_1, 1] -
unit_slab.positions[atom_0, 1]) / 2.0
# map positions back into unit cell
unit_slab.set_scaled_positions(unit_slab.get_scaled_positions())
if hasattr(params, 'surface'):
surface = params.surface
else:
# Make a surface unit cell by repllcating and adding some vaccum along y
surface = unit_slab * [1, params.surf_ny, 1]
surface.center(params.vacuum, axis=1)
# ********** Surface energy ************
# Calculate surface energy per unit area
surface.set_calculator(params.calc)
if hasattr(params, 'relax_bulk') and params.relax_bulk:
print('Minimising surface unit cell...')
surface = relax_structure(surface)
E_surf = surface.get_potential_energy()
E_per_atom_bulk = a.get_potential_energy() / len(a)
# volume is not get_volume() because it's not a solid 3D system!
volume = np.prod([np.ptp(vect) for vect in a.get_positions().T])
area = volume / np.ptp(a.get_positions()[:,1])
gamma = ((E_surf - E_per_atom_bulk * len(surface)) /
(2.0 * area))
print('Surface energy of %s surface %.4f J/m^2\n' %
(params.cleavage_plane, gamma / (units.J / units.m ** 2)))
# ***** Setup crack slab supercell *****
crack_slab = unit_slab
# open up the cell along x and y by introducing some vacuum
shrink_cell(crack_slab)
crack_slab.center(params.vacuum, axis=0)
crack_slab.center(params.vacuum, axis=1)
# centre the slab on the origin
crack_slab.positions[:, 0] -= crack_slab.positions[:, 0].mean()
crack_slab.positions[:, 1] -= crack_slab.positions[:, 1].mean()
crack_slab.positions[:, 1] += 0.5
top = crack_slab.positions[:, 1].max()
bottom = crack_slab.positions[:, 1].min()
left = crack_slab.positions[:, 0].min()
right = crack_slab.positions[:, 0].max()
orig_width = right - left
orig_height = top - bottom
print(('Made slab with %d atoms, original width and height: %.1f x %.1f A^2' %
(len(crack_slab), orig_width, orig_height)))
# ****** Apply initial strain ramp *****
strain = params.initial_strain
displacement = thin_strip_displacement_y(
crack_slab.positions[:, 0],
crack_slab.positions[:, 1],
strain,
left + params.crack_seed_length,
left + params.crack_seed_length +
params.strain_ramp_length)
displacement[swap] = -displacement[swap]
crack_slab.positions[:, 1] += displacement
# cleanup_crack_tip(crack_slab)
# fix atoms in the top and bottom rows
if 'groups' in crack_slab.arrays:
fixed_mask = crack_slab.get_array('groups') == 0
else:
fixed_mask = ((abs(crack_slab.positions[:, 1] - top) < 1.0) |
(abs(crack_slab.positions[:, 1] - bottom) < 1.0))
print('Fixed atoms: %d\n' % fixed_mask.sum())
# Save all calculated materials properties inside the Atoms object
crack_slab.info['nneightol'] = 1.3 # nearest neighbour tolerance
crack_slab.info['SurfaceEnergy'] = gamma
crack_slab.info['OrigWidth'] = orig_width
crack_slab.info['OrigHeight'] = orig_height
crack_slab.info['CrackDirection'] = params.crack_direction
crack_slab.info['CleavagePlane'] = params.cleavage_plane
crack_slab.info['CrackFront'] = params.crack_front
crack_slab.info['cell_origin'] = -np.diag(crack_slab.cell)/2.0
crack_slab.set_array('fixed_mask', fixed_mask)
ase.io.write('slab.xyz', crack_slab, format='extxyz')
print('Applied initial load: strain=%.4f' % strain)
# ***** Relaxation of crack slab *****
# optionally, relax the slab, keeping top and bottom rows fixed
if params.relax_slab:
ase.io.write('crack.xyz', crack_slab, format='extxyz')
crack_slab = relax_structure(crack_slab)
crack_slab.info['strain'] = strain
crack_slab.info['is_cracked'] = False
# ******** Save output file **********
# Save results in extended XYZ format, including extra properties and info
print('Writing crack slab to file "crack.xyz"')
ase.io.write('crack.xyz', crack_slab, format='extxyz')
crack_vis = ase.Atoms(crack_slab.get_atomic_numbers(), crack_slab.get_positions(), cell=crack_slab.get_cell())
crack_vis.set_array('index', np.arange(len(crack_vis)))
ase.io.write('crack_vis.xyz', crack_vis, format='extxyz')
|
marcocaccin/crack2Dglass
|
make_crack.py
|
Python
|
gpl-2.0
| 6,220
|
[
"ASE",
"Matscipy"
] |
e4bd37f8de20c84ebb17f6afb229c801a7f1f6490916b24b4b55b7ac61dde619
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Generally, the CASSCF solver does NOT return the natural orbitals.
1. Attribute .natorb controls whether the active space orbitals are transformed
to natural orbitals in the results.
2. When .natorb is set, the natural orbitals may NOT be sorted by the active
space occupancy. Within each irreps
'''
import numpy
from pyscf import gto, scf, mcscf
mol = gto.M(
atom = 'O 0 0 0; O 0 0 1.2',
basis = 'ccpvdz',
spin = 2)
myhf = scf.RHF(mol).run()
# 6 orbitals, 8 electrons
mycas = mcscf.CASSCF(myhf, 6, 8)
mycas.kernel() # Here mycas.mo_coeff are not natural orbitals
mycas.natorb = True
mycas.kernel() # Here mycas.mo_coeff are natural orbitals
#
# The natural orbitals in active space are NOT sorted by the occupancy.
#
mol = gto.M(
atom = 'O 0 0 0; O 0 0 1.2',
basis = 'ccpvdz',
symmetry = True,
spin = 2)
myhf = scf.RHF(mol).run()
mycas = mcscf.CASSCF(myhf, 6, 8)
mycas.natorb = True
# Here mycas.mo_coeff are natural orbitals because .natorb is on.
# Note The active space orbitals have the same symmetry as the input HF
# canonical orbitals. They are not fully sorted wrt the occpancies.
# The mcscf active orbitals are sorted only within each irreps.
mycas.kernel()
|
gkc1000/pyscf
|
examples/mcscf/03-natural_orbital.py
|
Python
|
apache-2.0
| 1,286
|
[
"PySCF"
] |
07862f819ed8e23d8289d84b2397c27016e13a7ea1ca21501756362d1ff8651b
|
#!/usr/bin/env python
"""tree_cable.py: A depth 10 binary tree like cable with following properties.
Depth Number Length (microns) Diameter (microns)
==========================================================================
0 1 32.0 16.0
1 2 25.4 10.08
2 4 20.16 6.35
3 8 16.0 4.0
4 16 12.7 2.52
5 32 10.08 1.587
6 64 8.0 1.0
7 128 6.35 0.63
8 256 5.04 0.397
9 512 4.0 0.25
The membrane properties are :
RA = 1.0 ohms meter = 100 ohms cm
RM = 4.0 ohms meter^2 = 40000 ohms cm^2
CM = 0.01 Farads/meter^2 = 1.0 uf/cm^2
EM = -0.065 Volts = -65 mV
Last modified: Sat Jan 18, 2014 05:01PM
"""
from __future__ import print_function
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import moose
import moose.utils as utils
import compartment as comp
import pylab
import numpy as np
import moose.backend.graphviz as graphviz
def nextValuePowerOf2Law( d1, power=2.0/3.0 ):
''' Given a value, compute the next value using 2^power law '''
return pow(pow(d1, 1.0/power)/2.0, power)
def testPowerLaw():
"""Test power law """
l = [ 32.0 ]
d = [ 16.0 ]
print(nextValuePowerOf2Law( d[-1] ))
print(nextValuePowerOf2Law( l[-1], 1.0/3.0 ))
class BinaryCable( ):
def __init__(self, depth):
''' init '''
self.depth = depth
self.size = pow(2, self.depth) - 1
self.compLenAtLevelZero = 32e-6
self.compDiamAtLevelZero = 16.0e-6
self.compLengthList = [ self.compLenAtLevelZero ]
self.compDiamList = [ self.compDiamAtLevelZero ]
self.cablePath = '/cable'
moose.Neutral(self.cablePath)
self.tablePath = '/data'
moose.Neutral(self.tablePath)
self.stimTables = []
def buildParameterLists(self):
''' Build list of parameters in moose '''
while len(self.compDiamList) < self.depth:
self.compDiamList.append(
nextValuePowerOf2Law(self.compDiamList[-1] )
)
while len(self.compLengthList) < self.depth:
self.compLengthList.append(
nextValuePowerOf2Law(self.compLengthList[-1], 1.0/3.0)
)
def buildCable(self, args):
''' Build binary cable '''
self.args = args
self.buildParameterLists()
# Cable is a list of lists.
self.cable = list()
for n, (l, d) in enumerate(zip(self.compLengthList, self.compDiamList)):
utils.dump("STEP"
, "Binary tree level {}: length {}, diameter {}".format(
n, l, d
)
)
noOfCompartments = pow(2, n)
compartmentList = []
for i in range(noOfCompartments):
compPath = '{}/comp_{}_{}'.format(self.cablePath, n, i)
m = comp.MooseCompartment( compPath, l, d, args )
compartmentList.append( m.mc_ )
self.cable.append( compartmentList )
self.connectCable()
def connectCable(self):
''' Connect the binary tree cable '''
for i, parentList in enumerate(self.cable[:-1]):
childrenList = self.cable[i+1]
for ii, p in enumerate(parentList):
leftChild = childrenList[ 2*ii + 0 ]
rightChild = childrenList[ 2*ii + 1 ]
moose.connect( p, 'raxial', leftChild, 'axial')
moose.connect( p, 'raxial', rightChild, 'axial' )
def setupDUT( self ):
''' Setup cable for recording '''
# Create a pulse input
moose.Neutral( self.tablePath )
stim = moose.PulseGen( '{}/input'.format( self.tablePath) )
stim.level[0] = self.args['inj']
stim.width[0] = self.args['run_time']
# Inject the current from stim to first compartment.
moose.connect( stim, 'output', self.cable[0][0], 'injectMsg' )
# Fill the data from stim into table.
inputTable = moose.Table( '{}/inputTable'.format( self.tablePath ) )
self.stimTables.append( inputTable )
moose.connect( inputTable, 'requestOut', stim, 'getOutputValue' )
def recordAt( self, depth, index ):
''' Parameter index is python list-like index. Index -1 is the last
elements in the list
'''
utils.dump( "RECORD"
, "Adding probe at index {} and depth {}".format(index, depth)
)
c = self.cable[depth][index]
t = moose.Table( '{}/output_at_{}'.format( self.tablePath, index ))
moose.connect( t, 'requestOut', c, 'getVm' )
return t
def setupSolver(self, path = '/hsolve'):
"""Setting up HSolver """
hsolve = moose.HSolve( path )
hsolve.dt = self.simDt
moose.setClock(1, self.simDt)
moose.useClock(1, hsolve.path, 'process')
hsolve.target = self.cablePath
def simulate(self, simTime, simDt, plotDt=None):
'''Simulate the cable
'''
self.simDt = simDt
self.setupDUT( )
# Setup clocks
moose.setClock( 0, self.simDt )
# Use clocks
moose.useClock( 0, '/##', 'process' )
moose.useClock( 0, '/##', 'init' )
utils.dump("STEP"
, [ "Simulating cable for {} sec".format(simTime)
, " simDt: %s" % self.simDt
]
)
utils.verify( )
moose.reinit( )
self.setupSolver( )
moose.start( simTime )
def main( args ):
# d is depth of cable.
d = args['tree_depth']
assert d > 0, "Cable depth can not be nagative"
binCable = BinaryCable( depth = d )
binCable.buildCable( args )
table0 = binCable.recordAt( depth = 0, index = 0 )
table1 = binCable.recordAt( depth = d-1, index = -1)
print("[STIM] Simulating a cable with depth {}".format(d))
binCable.simulate( simTime = args['run_time'], simDt = args['dt'] )
#utils.plotTables( [ table0, table1 ]
# , file = args['output']
# , xscale = args['dt']
# )
graphviz.writeGraphviz(__file__+".dot") #, compartment_shape='point')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description = 'Rallpacks1: A cable with passive compartments'
)
parser.add_argument( '--tau'
, default = 0.04
, help = 'Time constant of membrane'
)
parser.add_argument( '--run_time'
, default = 0.25
, help = 'Simulation run time'
)
parser.add_argument( '--dt'
, default = 5e-5
, help = 'Step time during simulation'
)
parser.add_argument( '--Em'
, default = -65e-3
, help = 'Resting potential of membrane'
)
parser.add_argument( '--RA'
, default = 1.0
, help = 'Axial resistivity'
)
parser.add_argument( '--RM'
, default = 4.0
, help = 'Membrane resistivity.'
)
parser.add_argument( '--lambda'
, default = 1e-3
, help = 'Lambda, what else?'
)
parser.add_argument( '--x'
, default = 1e-3
, help = 'You should record membrane potential somewhere, right?'
)
parser.add_argument( '--length'
, default = 1e-3
, help = 'Length of the cable'
)
parser.add_argument( '--diameter'
, default = 1e-6
, help = 'Diameter of cable'
)
parser.add_argument( '--inj'
, default = 1e-10
, help = 'Current injected at one end of the cable'
)
parser.add_argument( '--tree_depth'
, default = 10
, help = 'Depth of binary tree.'
)
parser.add_argument( '--output'
, default = None
, help = 'Store simulation results to this file'
)
args = parser.parse_args()
main( vars(args) )
|
subhacom/moose-core
|
tests/python/Rallpacks/rallpacks_tree_cable.py
|
Python
|
gpl-3.0
| 8,348
|
[
"MOOSE"
] |
cf56b67b05833940da1456d4d1a7f45f23f948531e97d69ff668fa1463f3bb81
|
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. This module will be removed in version 0.17.
It has been moved to a separate repository:
https://github.com/hmmlearn/hmmlearn
"""
import string
import numpy as np
from .utils import check_random_state, deprecated
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
@deprecated("WARNING: The HMM module and its functions will be removed in 0.17 "
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
@deprecated("WARNING: The HMM module and its function will be removed in 0.17"
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
self._covars_[self._covars_==0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
|
loli/sklearn-ensembletrees
|
sklearn/hmm.py
|
Python
|
bsd-3-clause
| 48,577
|
[
"Gaussian"
] |
d207545da86c2e93cd98b11902f700454d4f6c74a793287adeaa437530f94773
|
"""Identify program versions used for analysis, reporting in structured table.
Catalogs the full list of programs used in analysis, enabling reproduction of
results and tracking of provenance in output files.
"""
import os
import contextlib
import subprocess
import sys
import yaml
import toolz as tz
from bcbio import utils
from bcbio.pipeline import config_utils, version
from bcbio.pipeline import datadict as dd
from bcbio.log import logger
_cl_progs = [{"cmd": "bamtofastq", "name": "biobambam",
"args": "--version", "stdout_flag": "This is biobambam version"},
{"cmd": "bamtools", "args": "--version", "stdout_flag": "bamtools"},
{"cmd": "bcftools", "stdout_flag": "Version:"},
{"cmd": "bedtools", "args": "--version", "stdout_flag": "bedtools"},
{"cmd": "bowtie2", "args": "--version", "stdout_flag": "bowtie2-align version"},
{"cmd": "bwa", "stdout_flag": "Version:"},
{"cmd": "chanjo"},
{"cmd": "cutadapt", "args": "--version"},
{"cmd": "fastqc", "args": "--version", "stdout_flag": "FastQC"},
{"cmd": "freebayes", "stdout_flag": "version:"},
{"cmd": "gemini", "args": "--version", "stdout_flag": "gemini "},
{"cmd": "novosort", "paren_flag": "novosort"},
{"cmd": "novoalign", "stdout_flag": "Novoalign"},
{"cmd": "samtools", "stdout_flag": "Version:"},
{"cmd": "qualimap", "args": "-h", "stdout_flag": "QualiMap"},
{"cmd": "vcflib", "has_cl_version": False},
{"cmd": "featurecounts", "args": "-v", "stdout_flag": "featureCounts"}]
_manifest_progs = ["BubbleTree", "cufflinks-binary", "cnvkit", "gatk-framework", "grabix", "htseq",
"lumpy-sv", "manta", "metasv", "phylowgs", "platypus-variant", "rna-star",
"rtg-tools","sambamba-binary", "samblaster", "scalpel", "vardict",
"vardict-java", "vep", "vt", "wham"]
def _broad_versioner(type):
def get_version(config):
from bcbio import broad
try:
runner = broad.runner_from_config(config)
except ValueError:
return ""
if type == "gatk":
return runner.get_gatk_version()
elif type == "picard":
return runner.get_picard_version("ViewSam")
elif type == "mutect":
try:
runner = broad.runner_from_config(config, "mutect")
except ValueError:
return ""
return runner.get_mutect_version()
else:
raise NotImplementedError(type)
return get_version
def jar_versioner(program_name, jar_name):
"""Retrieve version information based on jar file.
"""
def get_version(config):
try:
pdir = config_utils.get_program(program_name, config, "dir")
# not configured
except ValueError:
return ""
jar = os.path.basename(config_utils.get_jar(jar_name, pdir))
for to_remove in [jar_name, ".jar", "-standalone"]:
jar = jar.replace(to_remove, "")
if jar.startswith(("-", ".")):
jar = jar[1:]
if not jar:
logger.warn("Unable to determine version for program '{}' from jar file {}".format(
program_name, config_utils.get_jar(jar_name, pdir)))
return jar
return get_version
def java_versioner(pname, jar_name, **kwargs):
def get_version(config):
try:
pdir = config_utils.get_program(pname, config, "dir")
except ValueError:
return ""
jar = config_utils.get_jar(jar_name, pdir)
kwargs["cmd"] = "java"
kwargs["args"] = "-Xms128m -Xmx256m -jar %s" % jar
return _get_cl_version(kwargs, config)
return get_version
_alt_progs = [{"name": "bcbio_variation",
"version_fn": jar_versioner("bcbio_variation", "bcbio.variation")},
{"name": "gatk", "version_fn": _broad_versioner("gatk")},
{"name": "mutect",
"version_fn": _broad_versioner("mutect")},
{"name": "picard", "version_fn": _broad_versioner("picard")},
{"name": "rnaseqc",
"version_fn": jar_versioner("rnaseqc", "RNA-SeQC")},
{"name": "snpeff",
"version_fn": java_versioner("snpeff", "snpEff", stdout_flag="snpEff version SnpEff")},
{"name": "varscan",
"version_fn": jar_versioner("varscan", "VarScan")},
{"name": "oncofuse",
"version_fn": jar_versioner("Oncofuse", "Oncofuse")},
{"name": "alientrimmer",
"version_fn": jar_versioner("AlienTrimmer", "AlienTrimmer")}
]
def _parse_from_stdoutflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
parts = [p for p in line[line.find(x) + len(x):].split() if p.strip()]
return parts[0].strip()
return ""
def _parse_from_parenflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
return line.split("(")[-1].split(")")[0]
return ""
def _get_cl_version(p, config):
"""Retrieve version of a single commandline program.
"""
if not p.get("has_cl_version", True):
return ""
try:
prog = config_utils.get_program(p["cmd"], config)
except config_utils.CmdNotFound:
localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"])
if os.path.exists(localpy_cmd):
prog = localpy_cmd
else:
return ""
args = p.get("args", "")
cmd = "{prog} {args}"
subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
if p.get("stdout_flag"):
v = _parse_from_stdoutflag(stdout, p["stdout_flag"])
elif p.get("paren_flag"):
v = _parse_from_parenflag(stdout, p["paren_flag"])
else:
lines = [l.strip() for l in stdout.read().split("\n") if l.strip()]
v = lines[-1]
if v.endswith("."):
v = v[:-1]
return v
def _get_brew_versions():
"""Retrieve versions of tools installed via brew.
"""
from bcbio import install
tooldir = install.get_defaults().get("tooldir")
brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew"
try:
vout = subprocess.check_output([brew_cmd, "list", "--versions"])
except OSError: # brew not installed/used
vout = ""
out = {}
for vstr in vout.split("\n"):
if vstr.strip():
parts = vstr.rstrip().split()
name = parts[0]
v = parts[-1]
out[name] = v
return out
def _get_versions(config=None):
"""Retrieve details on all programs available on the system.
"""
out = [{"program": "bcbio-nextgen",
"version": ("%s-%s" % (version.__version__, version.__git_revision__)
if version.__git_revision__ else version.__version__)}]
manifest_dir = _get_manifest_dir(config)
manifest_vs = _get_versions_manifest(manifest_dir)
if manifest_vs:
out += manifest_vs
else:
assert config is not None, "Need configuration to retrieve from non-manifest installs"
brew_vs = _get_brew_versions()
for p in _cl_progs:
out.append({"program": p["cmd"],
"version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else
_get_cl_version(p, config))})
for p in _alt_progs:
out.append({"program": p["name"],
"version": (brew_vs[p["name"]] if p["name"] in brew_vs else
p["version_fn"](config))})
out.sort(key=lambda x: x["program"])
return out
def _get_manifest_dir(data=None):
"""
get manifest directory from the data dictionary, falling back on alternatives
it prefers, in order:
1. locating it from the bcbio_system.yaml file
2. locating it from the galaxy directory
3. location it from the python executable.
it can accept either the data or config dictionary
"""
manifest_dir = None
if data:
bcbio_system = tz.get_in(["config", "bcbio_system"], data, None)
bcbio_system = bcbio_system if bcbio_system else data.get("bcbio_system", None)
if bcbio_system:
sibling_dir = os.path.normpath(os.path.dirname(bcbio_system))
else:
sibling_dir = dd.get_galaxy_dir(data)
if sibling_dir:
manifest_dir = os.path.normpath(os.path.join(sibling_dir, os.pardir,
"manifest"))
if not manifest_dir or not os.path.exists(manifest_dir):
manifest_dir = os.path.join(config_utils.get_base_installdir(), "manifest")
return manifest_dir
def _get_versions_manifest(manifest_dir):
"""Retrieve versions from a pre-existing manifest of installed software.
"""
all_pkgs = _manifest_progs + [p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs]
if os.path.exists(manifest_dir):
out = []
for plist in ["toolplus", "brew", "python", "r", "debian", "custom"]:
pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist)
if os.path.exists(pkg_file):
with open(pkg_file) as in_handle:
pkg_info = yaml.safe_load(in_handle)
added = []
for pkg in all_pkgs:
if pkg in pkg_info:
added.append(pkg)
out.append({"program": pkg, "version": pkg_info[pkg]["version"]})
for x in added:
all_pkgs.remove(x)
out.sort(key=lambda x: x["program"])
for pkg in all_pkgs:
out.append({"program": pkg, "version": ""})
return out
def _get_program_file(dirs):
if dirs.get("work"):
base_dir = utils.safe_makedir(os.path.join(dirs["work"], "provenance"))
return os.path.join(base_dir, "programs.txt")
def write_versions(dirs, config=None, is_wrapper=False):
"""Write CSV file with versions used in analysis pipeline.
"""
out_file = _get_program_file(dirs)
if is_wrapper:
assert utils.file_exists(out_file), "Failed to create program versions from VM"
elif out_file is None:
for p in _get_versions(config):
print("{program},{version}".format(**p))
else:
with open(out_file, "w") as out_handle:
for p in _get_versions(config):
out_handle.write("{program},{version}\n".format(**p))
return out_file
def get_version_manifest(name, data=None, required=False):
"""Retrieve a version from the currently installed manifest.
"""
manifest_dir = _get_manifest_dir(data)
manifest_vs = _get_versions_manifest(manifest_dir)
for x in manifest_vs:
if x["program"] == name:
v = x.get("version", "")
if v:
return v
if required:
raise ValueError("Did not find %s in install manifest. Could not check version." % name)
return ""
def add_subparser(subparsers):
"""Add command line option for exporting version information.
"""
parser = subparsers.add_parser("version",
help="Export versions of used software to stdout or a file ")
parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt",
default=None)
def get_version(name, dirs=None, config=None):
"""Retrieve the current version of the given program from cached names.
"""
if dirs:
p = _get_program_file(dirs)
else:
p = config["resources"]["program_versions"]
with open(p) as in_handle:
for line in in_handle:
prog, version = line.rstrip().split(",")
if prog == name and version:
return version
raise KeyError("Version information not found for %s in %s" % (name, p))
|
guillermo-carrasco/bcbio-nextgen
|
bcbio/provenance/programs.py
|
Python
|
mit
| 12,250
|
[
"BWA",
"Galaxy",
"HTSeq"
] |
0ea083d7c70201699288c541bc2b54556909710373383046f524afd216a26348
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
Salesforce To BigQuery
Move query results into a BigQuery table.
- Specify 1-Salesforce credentials.
- Specify the query youd like to execute.
- Specify a 2-SCHEMA for that query (optional).
1-Salesforce: https://developer.salesforce.com/
2-SCHEMA: https://cloud.google.com/bigquery/docs/schemas#creating_a_json_schema_file
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'domain':'login.salesforce.com', # Retrieve from a Salesforce Domain.
'client':'', # Retrieve from a Salesforce App.
'secret':'', # Retrieve from a Salesforce App.
'username':'', # Your Salesforce user email.
'password':'', # Your Salesforce login password.
'query':'', # The query to run in Salesforce.
'auth_read':'user', # Credentials used for reading data.
'dataset':'', # Existing BigQuery dataset.
'table':'', # Table to create from this report.
'schema':'[]', # Schema provided in JSON list format or empty list.
}
RECIPE = {
'tasks':[
{
'salesforce':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'domain':{'field':{'name':'domain','kind':'string','default':'login.salesforce.com','description':'Retrieve from a Salesforce Domain.'}},
'client':{'field':{'name':'client','kind':'string','default':'','description':'Retrieve from a Salesforce App.'}},
'secret':{'field':{'name':'secret','kind':'string','default':'','description':'Retrieve from a Salesforce App.'}},
'username':{'field':{'name':'username','kind':'email','default':'','description':'Your Salesforce user email.'}},
'password':{'field':{'name':'password','kind':'password','default':'','description':'Your Salesforce login password.'}},
'query':{'field':{'name':'query','kind':'string','default':'','description':'The query to run in Salesforce.'}},
'out':{
'bigquery':{
'dataset':{'field':{'name':'dataset','kind':'string','order':3,'default':'','description':'Existing BigQuery dataset.'}},
'table':{'field':{'name':'table','kind':'string','order':4,'default':'','description':'Table to create from this report.'}},
'schema':{'field':{'name':'schema','kind':'json','order':5,'default':'[]','description':'Schema provided in JSON list format or empty list.'}}
}
}
}
}
]
}
dag_maker = DAG_Factory('salesforce_to_bigquery', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
google/starthinker
|
dags/salesforce_to_bigquery_dag.py
|
Python
|
apache-2.0
| 5,630
|
[
"VisIt"
] |
1a5d5bf6a2330020a7d7985169ab3d67c8c9ceafdf4cb98ff7f9a9c021e13528
|
"""
Mind: Specify path for your bader executable in main with the needed options
"""
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.analysis.energy_models import EnergyModel
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.io.vaspio import Chgcar, Potcar
from mpinterfaces import *
class ColoumbEnergy(EnergyModel):
def get_energy(self, structure):
"""
use the atomic numbers of the structures's elements to compute
the coloumbic energy of the structure
"""
energy = 0
a = -1
b = -1
for i, sitei in enumerate(
structure): # enumerate(structure.as_dict()['sites']):
for j, sitej in enumerate(
structure): # enumerate(structure.as_dict()['sites']):
if i != j:
dij = structure.get_distance(i, j)
Zi = sitei.species_and_occu.items()[0][0].Z
Zj = sitej.species_and_occu.items()[0][0].Z
energy += 0.5 * Zi * Zj / dij
return energy
def get_bader_coulomb_energy(self, structure):
"""
use bader charges on atoms to compute the coloumbic energy
"""
energy = 0
force = np.zeros((len(structure), 3))
a = -1
b = -1
for i, sitei in enumerate(structure.as_dict()['sites']):
for j, sitej in enumerate(structure.as_dict()['sites']):
if i != j:
dij = structure.get_distance(i, j)
d_vec = structure.frac_coords[i] - structure.frac_coords[j]
Zi = sitei['species'][0]['oxidation_state']
Zj = sitej['species'][0]['oxidation_state']
energy += 0.5 * Zi * Zj / dij
force[i][0] += Zi * Zj / (dij ** 2) * (d_vec[0] / dij)
force[i][1] += Zi * Zj / (dij ** 2) * (d_vec[1] / dij)
force[i][2] += Zi * Zj / (dij ** 2) * (d_vec[2] / dij)
print force[i]
# to work on definition of forces
print np.sum(force[:, 0]), np.sum(force[:, 1]), np.sum(
force[:, 2]) # total force on cell in x, y, z ?
return energy
def get_ewald_sum(self, structure):
e = EwaldSummation(structure, real_space_cut=None,
recip_space_cut=None,
eta=None,
acc_factor=8.0)
return e.total_energy
def as_dict(self):
return {"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"name": "coloumb"}}
class Bader_Analysis(object):
"""
"""
def __init__(self, acf_path='./ACF.dat', chgcar_filename="./CHGCAR",
aecar0="./AECCAR0", aecar2="./AECCAR2",
potcar_filename="./POTCAR",
bader_path="./bader CHGCAR -ref Charge_sum"):
print "Reading CHGCAR"
self.chgcar = Chgcar.from_file(chgcar_filename)
##uncomment if you have to run from scratch##
# self.contcar = Structure.from_file(contcar_filename)
# print "Reading AECCAR0"
# Vol_obj_1 = Chgcar.from_file(aecar0)
# print "Reading AECCAR2"
# Vol_obj_2 = Chgcar.from_file(aecar2)
# print "Summing"
# Vol_obj_sum = Vol_obj_1.linear_add(Vol_obj_2)
# print "Writing Combined Sum"
# Vol_obj_sum.write_file("./Charge_sum")
# self.exe = bader_path
# os.system(self.exe)
self.potcar = Potcar.from_file(
potcar_filename) if potcar_filename is not None else None
self.natoms = self.chgcar.poscar.natoms
chgcarpath = os.path.abspath(chgcar_filename)
data = []
with open(acf_path) as f:
print "Reading ACF"
raw = f.readlines()
headers = [s.lower() for s in raw.pop(0).split()]
raw.pop(0)
while True:
l = raw.pop(0).strip()
if l.startswith("-"):
break
vals = map(float, l.split()[1:])
data.append(dict(zip(headers[1:], vals)))
for l in raw:
toks = l.strip().split(":")
if toks[0] == "VACUUM CHARGE":
self.vacuum_charge = float(toks[1])
elif toks[0] == "VACUUM VOLUME":
self.vacuum_volume = float(toks[1])
elif toks[0] == "NUMBER OF ELECTRONS":
self.nelectrons = float(toks[1])
self.data = data
def get_charge_transfer(self, atom_index):
"""
Returns the charge transferred for a particular atom. Requires POTCAR
to be supplied.
Args:
atom_index:
Index of atom.
Returns:
Charge transfer associated with atom from the Bader analysis.
Given by final charge on atom - nelectrons in POTCAR for
associated atom.
"""
if self.potcar is None:
raise ValueError("POTCAR must be supplied in order to calculate "
"charge transfer!")
potcar_indices = []
for i, v in enumerate(self.natoms):
potcar_indices += [i] * v
nelect = self.potcar[potcar_indices[atom_index]].nelectrons
return self.data[atom_index][
"charge"] - nelect # nelect - self.data[atom_index]["charge"]
def get_charge(self, atom_index_list=None):
if atom_index_list:
for i in atom_index_list:
print "Charge at atom index ", i, " is ", self.data[i][
"charge"]
def get_oxidation_state_decorated_structure(self):
structure = self.chgcar.structure
charges = [self.get_charge_transfer(i) for i in range(len(structure))]
structure.add_oxidation_state_by_site(charges)
return structure
class Scan_Interface(Structure):
"""
moves the ligand
"""
def __init__(self, structure=None, search_box=[0.1, 0.1, -0.1],
search_step=0.01, energy_model='bader_coulomb',
interface_barrier_height=0.62):
self.structure = structure
self.search_box = search_box # cuboid search box
self.search_step = search_step # step size of search
self.energy_model = energy_model # energy model
self.interface_barrier_height = interface_barrier_height # dividing height for atoms to be shifted
def ligand_shifts(self):
vec = []
box_a = self.search_box[0]
box_b = self.search_box[1]
box_c = self.search_box[2]
step = self.search_step
limit_a = box_a / step
limit_b = box_b / step
limit_c = box_c / step
print limit_a, limit_b, limit_c
for i in range(0, int(limit_a)):
for j in range(0, int(limit_b)):
for k in range(0, int(-1 * limit_c)):
vec.append([i * step, j * step, k * step])
return vec
def get_struct_energy(self, structure):
ce = ColoumbEnergy()
strt = structure
# direct_sum_energy=ce.get_energy(strt_ox_iface) #other energy models
if self.energy_model == 'bader_coulomb':
energy = ce.get_bader_coulomb_energy(strt)
# ewald_energy = ce.get_ewald_sum(strt_ox_iface)
return energy
def trial_interface(self):
strts = []
energies = []
vecs = self.ligand_shifts()
old_struct_energy = self.get_struct_energy(self.structure)
print "calculated old energy ... searching for lower energies"
print old_struct_energy
print enumerate(vecs)
for k, j in enumerate(vecs):
trans = self.structure.copy()
for i, sitei in enumerate(trans.frac_coords):
if sitei[
2] > self.interface_barrier_height: # gets ligand atoms to translate sites
trans.translate_sites([i], j)
if not trans.is_valid(
tol=1.0): # tolearnce for closeness of atoms set to 1 A
print "possibly too close atoms"
else:
egy = self.get_struct_energy(structure=trans)
print egy
print "energy= ", egy
if egy < old_struct_energy:
print "Lower energy", j
strts.append(trans)
energies.append(egy)
if len(strts) > 10:
return strts, energies
return strts, energies
if __name__ == '__main__':
# relaxed_structure = Structure.from_file("POSCAR")
bader_path = "./bader CHGCAR -ref Charge_sum"
BA = Bader_Analysis(
bader_path=bader_path) # optional user specifies bader path
relaxed_oxidated_structure = BA.get_oxidation_state_decorated_structure()
print ("got oxidated structure.. starting optimization")
print (relaxed_oxidated_structure)
|
joshgabriel/MPInterfaces
|
dev_scripts/coulomb_summer_test.py
|
Python
|
mit
| 9,086
|
[
"pymatgen"
] |
cf18995f2deefe4483f929cec9a242de7a3d0562886489bee7d17bc184243941
|
import sys, string
import export
import math
import random
import copy
import os
import os.path
import unique
import traceback
R_present=True
try:
### If file is present use this location
loc = unique.filepath('Config/R_location.txt')
s = open(loc,'r')
useStaticLocation=s.read()
#print useStaticLocation
#print 'Using the Config designated location
for p in os.environ['PATH'].split(':'): ### For Unix cluster environments
if os.path.exists(p + '/R'):
#path = p + '/R'
useStaticLocation = False
except Exception:
#print 'NOT using the Config designated location'
useStaticLocation = False
try:
forceError ### This doesn't currently work with the compiled version of AltAnalyze
import rpy2.robjects as robjects
r = robjects.r
print "\n---------Using RPY2---------\n"
except Exception:
from pyper import *
#print "\n---------Using PypeR---------\n"
### Running the wrong one once is fine, but multiple times causes it to stall in a single session
try:
try:
if 'Xdarwin' in sys.platform: ### Xdarwin is indicated since this if statement is invalid without a stand-alone Mac R package (ideal)
#print 'Using AltAnalyze local version of R'
#print 'A'
path = unique.filepath("AltDatabase/tools/R/Mac/R")
r = R(RCMD=path,use_numpy=True)
elif os.name == 'nt':
path = unique.filepath("AltDatabase/tools/R/PC/bin/x64/R.exe")
r = R(RCMD=path,use_numpy=True)
else:
#print 'B'
if useStaticLocation == False or useStaticLocation=='no':
print 'NOT using static location'
r = R(use_numpy=True)
else:
print 'Using static location'
path = '/usr/local/bin/R'
if os.path.exists(path): pass
else:
path = '/usr/bin/R'
if os.path.exists(path):
print 'Using the R path:',path
r = R(RCMD=path,use_numpy=True)
else:
r = None
R_present=False
print 'R does not appear to be installed... Please install first.'
except Exception:
#print 'C'
r = R(use_numpy=True)
except Exception:
print traceback.format_exc()
r = None
R_present=False
pass
LegacyMode = True
### Create a Directory for R packages in the AltAnalyze program directory (in non-existant)
r_package_path = string.replace(os.getcwd()+'/Config/R','\\','/') ### R doesn't link \\
r_package_path = unique.filepath(r_package_path) ### Remove the AltAnalyze.app location
try: os.mkdir(r_package_path)
except Exception: None
if R_present:
### Set an R-package installation path
command = '.libPaths("'+r_package_path+'")'; r(command) ### doesn't work with %s for some reason
#print_out = r('.libPaths()');print print_out; sys.exit()
def remoteMonocle(input_file,expPercent,pval,numGroups):
#input_file="Altanalyze"
setWorkingDirectory(findParentDir(input_file)[:-1])
try: os.mkdir(findParentDir(input_file)[:-1])
except Exception: None
z = RScripts(input_file)
setWorkingDirectory(input_file)
z.Monocle(input_file,expPercent,pval,numGroups)
def remoteHopach(input_file,cluster_method,metric_gene,metric_array,force_array='',force_gene=''):
""" Run Hopach via a call from an external clustering and visualizaiton module """
#input_file = input_file[1:] #not sure why, but the '\' needs to be there while reading initally but not while accessing the file late
row_order = []
column_order = []
if 'ICGS-SubCluster' in input_file:
force_array=2
input_file = checkForDuplicateIDs(input_file) ### Duplicate IDs will cause R to exit when creating the data matrix
z = RScripts(input_file)
setWorkingDirectory(input_file)
z.Hopach(cluster_method,metric_gene,force_gene,metric_array,force_array)
if cluster_method == 'both' or cluster_method == 'gene':
filename = findParentDir(input_file)+'/hopach/rows.'+findFileName(input_file)
row_order = importHopachOutput(filename)
if cluster_method == 'both' or cluster_method == 'array':
filename = findParentDir(input_file)+'/hopach/columns.'+findFileName(input_file)
column_order = importHopachOutput(filename)
#print row_order; sys.exit()
return input_file, row_order, column_order
def remoteAffyNormalization(input_file,normalization_method,probe_level,batch_effects):
### Input file is the path of the expression output from normalization
setWorkingDirectory(findParentDir(input_file)[:-1])
try: os.mkdir(findParentDir(input_file)[:-1])
except Exception: None #Already exists
z = RScripts(input_file)
z.AffyNormalization(normalization_method,probe_level,batch_effects)
def checkForDuplicateIDs(input_file, useOrderedDict=True):
if 'SamplePrediction' in input_file or '-Guide' in input_file:
### OrderedDict is prefered but will alter prior ICGS results
useOrderedDict = False
first_row = True
import collections
if useOrderedDict:
try: key_db = collections.OrderedDict()
except Exception:
try:
import ordereddict
key_db = ordereddict.OrderedDict()
except Exception:
key_db={}
else:
key_db={}
key_list=[]
fn=filepath(input_file)
offset=0
nonNumericsPresent=False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if first_row == True:
if ('row_clusters-flat' in t and 'row_clusters-flat' not in t[0]):
headers = string.join(['uid']+t[2:],'\t')+'\n'
offset = 1
elif '-filtered.txt' in fn and ".R2." in t[1] and LegacyMode:
headers = string.join(['uid']+t[2:],'\t')+'\n'
offset = 1
else:
headers = line
first_row = False
else:
key = t[0]
try:
k1,k2string.split(key,' ')
print [k1, k2],
if k1==k2: key = k1
print key
except Exception: pass
if key!='column_clusters-flat':
key_list.append(key)
try: s = map(float,t[offset+1:])
except Exception:
nonNumericsPresent=True
key_db[key]=t
if nonNumericsPresent:
import numpy
for key in key_db:
t = key_db[key]
s=[key]
if offset ==1: s.append('')
temp=[]
for value in t[offset+1:]:
try: temp.append(float(value))
except Exception: pass
avg=numpy.mean(temp)
for value in t[offset+1:]:
try: s.append(str(float(value)-avg))
except Exception: s.append('0.000101')
key_db[key]=s
if len(key_db) != len(key_list) or offset>0 or nonNumericsPresent:
print 'Writing a cleaned-up version of the input file:'
### Duplicate IDs present
input_file = input_file[:-4]+'-clean.txt'
export_text = export.ExportFile(input_file) ### create a new input file
export_text.write(headers) ### Header is the same for each file
for key in key_db:
t = key_db[key]
if offset > 0:
t = [t[0]]+t[1+offset:]
export_text.write(string.join(t,'\t')+'\n') ### Write z-score values and row names
export_text.close()
print 'File written...'
return input_file
def importHopachOutput(filename):
#print filename
""" Import the ID order information """
db={} ### Used to store the cluster data
hopach_clusters=[]
cluster_level=[]
cluster_level2=[]
cluster_level3=[]
hopach_db={}
cluster_db={}
level2_level1={}
firstLine = True
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine: firstLine = False
else:
t = string.split(data,'\t')
final_level_order = int(t[-1])
index, uid, cluster_number, cluster_label, cluster_level_order, final_label, final_level_order = string.split(data,'\t')
try: l2 = str(int(round(float(cluster_label),0)))[:2]
except Exception: l2 = int(cluster_label[0])
try: l3 = str(int(round(float(cluster_label),0)))[:3]
except Exception: l3 = int(cluster_label[0])
hopach_clusters.append((int(final_level_order),int(index)-1)) ### Need to order according to the original index, sorted by the clustered order
cluster_level.append(int(cluster_label[0])) ### This is the root cluster number
cluster_level2.append(l2) ### Additional cluster levels
cluster_level3.append(l3)
hopach_db[uid] = cluster_label
level2_level1[l2] = int(cluster_label[0])
level2_level1[l3] = int(cluster_label[0])
try: cluster_db[int(float(cluster_label[0]))].append(uid)
except Exception: cluster_db[int(cluster_label[0])] = [uid]
try: cluster_db[l2].append(uid)
except Exception: cluster_db[l2] = [uid]
try: cluster_db[l3].append(uid)
except Exception: cluster_db[l3] = [uid]
split_cluster=[]
if 'column' in fn:
cluster_limit = 50 ### typically less columns than rows
else:
cluster_limit = 75
for cluster in cluster_db:
#print cluster,len(cluster_db[cluster]),(float(len(cluster_db[cluster]))/len(hopach_db))
if len(cluster_db[cluster])>cluster_limit and (float(len(cluster_db[cluster]))/len(hopach_db))>0.2:
#print cluster
if cluster<10:
split_cluster.append(cluster)
import unique
levels1 = unique.unique(cluster_level)
already_split={}
updated_indexes={}
if len(split_cluster)>0:
print 'Splitting large hopach clusters:',split_cluster
i=0
for l2 in cluster_level2:
l1 = level2_level1[l2]
if l1 in split_cluster:
cluster_level[i] = l2
try:
l2_db = already_split[l1]
l2_db[l2]=[]
except Exception: already_split[l1] = {l2:[]}
i+=1
### Check and see if the l1 was split or not (might need 3 levels)
i=0
for l3 in cluster_level3:
l1 = level2_level1[l3]
if l1 in already_split:
#l1_members = len(cluster_db[l1])
l2_members = len(already_split[l1])
#print l1, l3, l1_members, l2_members
if l2_members == 1: ### Thus, not split
cluster_level[i] = l3
#print l1, l3, 'split'
i+=1
else:
if len(cluster_level) > 50: ### Decide to use different hopach levels
if len(levels1)<3:
cluster_level = cluster_level2
if len(cluster_level) > 200:
if len(levels1)<4:
cluster_level = cluster_level2
hopach_clusters.sort()
hopach_clusters = map(lambda x: x[1], hopach_clusters) ### Store the original file indexes in order based the cluster final order
### Change the cluster_levels from non-integers to integers for ICGS comparison group simplicity and better coloring of the color bar
cluster_level2 = []
### Rename the sorted cluster IDs as integers
cluster_level_sort = []
for i in cluster_level:
if str(i) not in cluster_level_sort:
cluster_level_sort.append(str(i))
cluster_level2.append(str(i))
cluster_level_sort.sort()
cluster_level = cluster_level2
cluster_level2=[]
i=1; cluster_conversion={}
for c in cluster_level_sort:
cluster_conversion[str(c)] = str(i)
i+=1
for c in cluster_level:
cluster_level2.append(cluster_conversion[c])
#print string.join(map(str,cluster_level2),'\t');sys.exit()
db['leaves'] = hopach_clusters ### This mimics Scipy's cluster output data structure
db['level'] = cluster_level2
return db
class RScripts:
def __init__(self,file):
self._file = file
def format_value_for_R(self,value):
value = '"'+value+'"'
return value
def File(self):
filename = self._file
filename_list = string.split(filename,'/')
filename = filename_list[-1]
filename = self.format_value_for_R(filename)
#root_dir = string.join(filename_list[:-1],'/')
return filename
def Monocle(self,samplelogfile,expPercent,p_val,numGroups):
#samplelogfile='C:/Users/venz6v/Documents/Altanalyze R/data.txt'
#grp_list="C:/Users/venz6v/Documents/Altanalyze R/grous.txt"
#gene_list="C:/Users/venz6v/Documents/Altanalyze R/gene.txt"
filename=self.File()
samplelogfile=findParentDir(filename)+'Monocle/expressionFile.txt"'
grp_list=findParentDir(filename)+'Monocle/sampleGroups.txt"'
gene_list=findParentDir(filename)+'Monocle/geneAnnotations.txt"'
pseudo_tree=findParentDir(filename)+'Monocle/monoclePseudotime.pdf"'
pseudo_txt=findParentDir(filename)+'Monocle/monoclePseudotime.txt"'
#try: os.mkdir(findParentDir(samplelogfile)) ### create "hopach" dir if not present
#except Exception: None
#try: os.mkdir(findParentDir(grp_list)) ### create "hopach" dir if not present
#except Exception: None
#try: os.mkdir(findParentDir(gene_list)) ### create "hopach" dir if not present
#except Exception: None
#self._file = samplelogfile
#samplelogfile = self.File()
#self._file = grp_list
#grp_list = self.File()
#self._file = gene_list
#gene_list = self.File()
print 'Loading monocle package in R'
print_out = r('library("monocle")')
if "Error" in print_out:
print 'Installing the R package "monocle" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("monocle")')
print print_out
print_out = r('library("monocle")')
if "Error" in print_out: print 'unable to download the package "monocle"';
print_out = r('library("monocle")')
print "Reading Monocle data..."
data_import = 'fpkm_matrix<-read.delim(%s,row.names=1,check.names=FALSE)' % samplelogfile
#print [data_import]
print_out = r(data_import);
print print_out
data_import = 'sample_sheet<-read.delim(%s,row.names=1,check.names=FALSE)' % grp_list
#print [data_import]
print_out = r(data_import);
print print_out
data_import = 'gene_ann<-read.delim(%s,row.names=1,check.names=FALSE)' % gene_list
#print [data_import]
print_out = r(data_import);
print print_out
print_out= r('pd <- new("AnnotatedDataFrame",data=sample_sheet)');
print_out=r('fd <- new("AnnotatedDataFrame",data=gene_ann)');
print_out=r('URMM <- newCellDataSet(as.matrix(fpkm_matrix),phenoData = pd,featureData =fd)');
print print_out
#colname(a) == colname(b)
print_out=r('URMM<- detectGenes(URMM, min_expr = 0)')
gene_exp='expressed_genes <- row.names(subset(fData(URMM), num_cells_expressed >=%s ))'% expPercent
#print [gene_exp]
try:print_out = r(gene_exp)
except Exception:
print "expression genes"
print_out=r('length(expressed_genes)')
print print_out
# specify the grouping column for finding differential genes
import multiprocessing
cores = multiprocessing.cpu_count()
print 'using', cores, 'cores'
k = 'diff_test_res <- differentialGeneTest(URMM[expressed_genes, ], fullModelFormulaStr = "expression~Group",cores=%s)' % cores
print [k]
print_out=r(k)
print print_out
gene_ord='ordering_genes <- row.names(subset(diff_test_res, pval < %s))' %p_val
print_out=r(gene_ord); print print_out
print_out=r('write.table(ordering_genes,file="ordering_genes.txt")') ### Writing out the informative genes used
print print_out
print_out=r('length(ordering_genes)'); print 'number or ordering genes',print_out
print_out=r('ordering_genes <- intersect(ordering_genes, expressed_genes)'); print print_out
print_out=r('URMM <- setOrderingFilter(URMM, ordering_genes)'); print print_out
print_out=r('URMM <- reduceDimension(URMM, use_irlba = F)'); print print_out
for i in range(numGroups,1,-1):
span='URMM <- orderCells(URMM, num_paths = %s, reverse = F)'% i;
print_out=r(span);
print print_out
if "Error" in print_out:
continue
else:
print_out=r(span);print i
print print_out
break
print_out=r('png("Monocle/monoclePseudotime.png")');
print print_out
print_out=r('plot_spanning_tree(URMM)'); print print_out
print_out=r('dev.off()')
print_out=r('pdf("Monocle/monoclePseudotime.pdf")');
print print_out
print_out=r('plot_spanning_tree(URMM)'); print print_out
print_out=r('dev.off()')
"""
print_out=r('pdf("Monocle/monoclePseudotimeOriginalGroups.pdf")');
print print_out
print_out=r('plot_spanning_tree(URMM), color_by = "originalGroups"'); print print_out
print_out=r('dev.off()')
"""
print_out=r('write.table(pData(URMM),file="Monocle/monoclePseudotime.txt")')
print " completed"
def AffyNormalization(self,normalization_method,probe_level,batch_effects):
print 'Loading affy package in R'
print_out = r('library("affy")')
if "Error" in print_out:
#print_out = r('install.packages("ggplot2", repos="http://cran.us.r-project.org")')
print 'Installing the R package "affy" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("affy")')
if "Error" in print_out: print 'unable to download the package "affy"'; forceError
print_out = r('library("affy")')
if 'gcrma' in normalization_method:
print 'Loading gcrma package in R'
print_out = r('library("gcrma")')
if "Error" in print_out:
print 'Installing the R package "gcrma" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("gcrma")')
if "Error" in print_out: print 'unable to download the package "gcrma"'; forceError
print_out = r('library("gcrma")')
if batch_effects == 'remove':
### Import or download support for SVA/Combat
print 'Loading sva package in R'
print_out = r('library("sva")')
if "Error" in print_out:
print 'Installing the R package "sva" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("sva")')
if "Error" in print_out: print 'unable to download the package "sva"'; forceError
print_out = r('library("sva")')
print "Reading Affy files..."
print_out = r('rawdata<-ReadAffy()')
print print_out
r('setwd("ExpressionInput")')
if probe_level: ### normalize at the level of probes rahter than probeset (e.g., alt.exon analysis of 3' array)
print_out = r('PM<-probes(rawdata,which="pm")'); print print_out
print_out = r('AffyInfo<-dimnames(PM)[[1]]'); print print_out
print_out = r('cutpos<-regexpr("\\d+$",AffyInfo,perl=T)'); print print_out
print_out = r('AffyID<-substr(AffyInfo,1,cutpos-1)'); print print_out
print_out = r('probe<-as.numeric(substr(AffyInfo,cutpos,nchar(AffyInfo)))'); print print_out
print_out = r('data.bgc<-bg.correct(rawdata,method="rma")'); print print_out
print_out = r('data.bgc.q<-normalize.AffyBatch.quantiles(data.bgc,type="pmonly")'); print print_out
print_out = r('pm.bgc.q<-probes(data.bgc.q,which="pm")'); print print_out
print_out = r('normalized<-cbind(AffyID,probe,pm.bgc.q)'); print print_out
command = 'write.table(normalized,file='+self.File()+',sep="\t",row.names=FALSE, quote=FALSE)'
print_out = r(command)
print print_out
print 'probe-level normalization complete'
else:
print "Begining %s normalization (will install array annotations if needed)... be patient" % normalization_method
print_out = r('normalized<-%s(rawdata)') % normalization_method
print print_out
command = 'write.exprs(normalized,'+self.File()+')'; print_out = r(command)
print print_out
print self.File(), 'written...'
if batch_effects == 'remove':
### Import data
command = 'mod = model.matrix(~as.factor(cancer) + age, data=pheno)'
print_out = r(command)
command = 'cdata = ComBat(dat=normalized, batch=as.factor(pheno$batch), mod=mod, numCov=match("age", colnames(mod)))'
print_out = r(command)
command = 'write.table(cdata,file='+self.File()+',sep="\t",row.names=FALSE, quote=FALSE)'
print_out = r(command)
output_file = string.replace(self.File(),'exp.','stats.')
print_out = r('calls<-mas5calls(rawdata)')
#print_out = r('pvals<-se.exprs(calls)') ### outdated?
print_out = r('pvals<-assayData(calls)[["se.exprs"]]')
command = 'write.table(pvals,'+output_file+',sep = "\t", col.names = NA)'; print_out = r(command)
print output_file, 'written...'
def Limma(self,test_type):
r('library("limma")')
filename = self.File()
try: output_file = string.replace(filename,'input','output-'+test_type)
except ValueError: output_file = filename[0:-4]+'-output.txt'
print "Begining to process",filename
data_import = 'data<-read.table(%s,sep="\t",header=T,row.names=1,as.is=T)' % filename
print_out = r(data_import)
design_matrix_file = string.replace(filename,'input','design')
design_import = 'design<-read.table(%s,sep="\t",header=T,row.names=1,as.is=T)' % design_matrix_file
design_matrix = r(design_import)
print_out = r('fit<-lmFit(data,design)')
fit_data = r['fit']
print_out = r('fit<-eBayes(fit)')
fit_data = r['fit']
contrast_matrix_file = string.replace(filename,'input','contrast')
contrast_import = 'contrast<-read.table(%s,sep="\t",header=T,row.names=1,as.is=T)' % contrast_matrix_file
print_out = r(contrast_import)
contrast_matrix = r['contrast']
r('contrast<-as.matrix(contrast)')
r('fit.contrast<-contrasts.fit(fit,contrast)')
r('fit.contrast<-eBayes(fit.contrast)')
r('nonadj<-fit.contrast$F.p.value')
if test_type == 'fdr':
print_out = r('results<-p.adjust(fit.contrast$F.p.value,method="fdr")')
else:
print_out = r('results<-nonadj')
result = r['results']
print 'test_type=',test_type
print_out = r('sum(results<0.05)')
summary = r['sum']
print "Number of probeset with a p<0.05",summary,"using",test_type
r('output<-cbind(data,results)')
output = 'write.table(output,%s,sep="\t")' % output_file
print_out = r(output)
print output_file, 'written...'
def Multtest(self,test_type):
r('library("multtest")')
filename = self.File()
try: output_file = string.replace(filename,'input','output')
except ValueError: output_file = filename[0:-4]+'-output.txt'
print "Begining to process",filename
parse_line = 'job<-read.table(%s,sep="\t", row.names=1, as.is=T)' % filename
print_out = r(parse_line)
print_out = r('matrix_size<-dim(job)')
print_out = r('label<-job[1,2:matrix_size[2]]')
print_out = r('jobdata<-job[2:matrix_size[1],2:matrix_size[2]]')
if test_type == "f":
print_out = r('ttest<-mt.maxT(jobdata,label, test="f", B=50000)')
if test_type == "t":
print_out = r('ttest<-mt.maxT(jobdata,label)')
print_out = r('ttest2<-ttest[order(ttest[,1]),]')
write_file = 'write.table(ttest2,%s,sep="\t")' % output_file
print_out = r(write_file)
print "Results written to:",output_file
def check_hopach_file_type(self):
if 'hopach.input' in self.File():
return 'continue'
else: return 'break'
def check_multtest_file_type(self):
if 'output' not in self.File():
return 'continue'
else: return 'break'
def check_limma_file_type(self):
if 'input' in self.File():
return 'continue'
else: return 'break'
def Hopach(self,cluster_method,metric_gene,force_gene,metric_array,force_array):
if R_present==False:
rNotPresent
print_out = r('library("Biobase")')
if "Error" in print_out:
print 'Installing the R package "Biobase" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("Biobase")')
if "Error" in print_out: print 'unable to download the package "Biobase"'; forceError
print_out = r('library("Biobase")')
print_out = r('library("hopach")')
if "Error" in print_out:
print 'Installing the R package "hopach" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("hopach")')
if "Error" in print_out: print 'unable to download the package "hopach"'; forceError
print_out = r('library("hopach")')
filename = self.File()
#r('memory.limit(2000)')
print "Begining to process",filename,"using HOPACH"
metric_g = self.format_value_for_R(metric_gene)
metric_a = self.format_value_for_R(metric_array)
parse_line = 'data<-read.table(%s,sep="\t",as.is=T,row.names=1,header=T)' % filename
checklinelengths(self._file)
print_out = r(parse_line)
dat = r['data']
print_out = r(parse_line)
#print "Number of columns in input file:",len(dat)
print_out = r('data<-as.matrix(data)')
dat = r['data']
#print "Number of columns in matrix:",len(dat)
force1=''; force2=''; hopg='NULL'; hopa='NULL'; distmatg='NULL'; distmata = 'NULL' ### defaults for tree export
if force_gene != '' and force_gene != 0: force1=',kmax='+str(force_gene)+', khigh='+str(force_gene)+', K='+str(force_array)
if force_array != '' and force_array != 0: force2=',kmax='+str(force_array)+', khigh='+str(force_array)+', K='+str(force_array)
if cluster_method == 'both' or cluster_method == 'gene':
distance_matrix_line = 'distmatg<-distancematrix(data,d=%s)' % metric_g
#print distance_matrix_line
if len(dat) > 1:
print_out1 = r(distance_matrix_line)
print_out2 = r('hopg<-hopach(data,dmat=distmatg,ord="own"'+force1+')')
#print 'hopg<-hopach(data,dmat=distmatg,ord="own"'+force1+')'
try: hopach_run = r['hopg']
except Exception:
print print_out1
print print_out2
hopg = 'hopg'
distmatg = 'distmatg'
gene_output = self.HopachGeneOutputFilename(metric_gene,str(force_gene))
output = 'out<-makeoutput(data,hopg,file=%s)' % gene_output
#print output
print_out = r(output)
#print print_out
output_file = r['out']
status = 'stop'
if 'clustering' in hopach_run:
if 'order' in hopach_run['clustering']:
try:
if len(hopach_run['clustering']['order']) > 10: status = 'continue'
except TypeError:
error = 'file: '+filename+": Hopach returned the array of cluster orders as blank while clustering GENES... can not process cluster... continuing with other files"
print error; errors.append(error)
if status == 'continue':
r(output_file); print 'hopach output written'
else:
error = 'file: '+filename+" Hopach returned data-matrix length zero...ARRAY clusters can not be generated"
print error; errors.append(error)
if cluster_method == 'both' or cluster_method == 'array':
distance_matrix_line = 'distmata<-distancematrix(t(data),d=%s)' % metric_a
if len(dat) > 1:
dist = r(distance_matrix_line)
#print distance_matrix_line
print_out = r('hopa<-hopach(t(data),dmat=distmata,ord="own"'+force1+')') #,coll="all"
#print ['hopa<-hopach(t(data),dmat=distmata,ord="own",'+force2+')']
#print 'hopa<-hopach(t(data),dmat=distmata,ord="own"'+force2+')'
hopach_run = r['hopa']
hopa = 'hopa'
distmata = 'distmata'
array_output = self.HopachArrayOutputFilename(metric_array,str(force_array))
output = 'out<-makeoutput(t(data),hopa,file=%s)' % array_output
print_out = r(output)
output_file = r['out']
status = 'stop'
if 'clustering' in hopach_run:
if 'order' in hopach_run['clustering']:
try:
if len(hopach_run['clustering']['order']) > 10: status = 'continue'
except TypeError:
error = 'file: '+filename+": Hopach returned the array of cluster orders as blank while clustering ARRAYS... can not process cluster"
print error; errors.append(error)
if status == 'continue':
r(output_file); print 'hopach output written'
else:
error = 'file: '+filename+"data-matrix length zero...ARRAY clusters can not be generated...continuing analysis"
print error; errors.append(error)
if len(metric_g)==0: metric_g = 'NULL'
if len(metric_a)==0: metric_a = 'NULL'
try:
output_filename = string.replace(gene_output,'rows.','')
cdt_output_line = 'hopach2tree(data, file = %s, hopach.genes = %s, hopach.arrays = %s, dist.genes = %s, dist.arrays = %s, d.genes = %s, d.arrays = %s, gene.wts = NULL, array.wts = NULL, gene.names = NULL)' % (output_filename,hopg,hopa,distmatg,distmata,metric_g,metric_a) ###7 values
except Exception: None
make_tree_line = 'makeTree(labels, ord, medoids, dist, side = "GENE")' ### Used internally by HOPACH
#print cdt_output_line
try: print_out = r(cdt_output_line)
except Exception: None
#print print_out
def HopachGeneOutputFilename(self,value,force):
filename = self.File() ### Relative to the set working directory
if 'hopach.input' in filename: ### When running this module on it's own (requires nown filetypes)
new_filename = string.replace(filename,'hopach.input','hopach.output')
if len(value)>1: new_filename = string.replace(new_filename,'.txt','-'+value+'.txt')
if len(force)>0: new_filename = string.replace(new_filename,'.txt','-'+'force_'+str(force)+'c.txt')
else: ### When called from an external heatmap visualization module
filename = self._file ### full path
new_filename = findParentDir(filename)+'/hopach/rows.'+findFileName(filename)
try: os.mkdir(findParentDir(new_filename)) ### create "hopach" dir if not present
except Exception: None
new_filename = '"'+new_filename+'"'
return new_filename
def HopachArrayOutputFilename(self,value,force):
filename = self.File()
if 'hopach.input' in filename: ### When running this module on it's own (requires nown filetypes)
new_filename = string.replace(filename,'hopach.input','arrays.output')
if len(value)>1: new_filename = string.replace(new_filename,'.txt','-'+value+'.txt')
if len(force)>0: new_filename = string.replace(new_filename,'.txt','-'+'force_'+str(force)+'c.txt')
else:
filename = self._file ### full path
filename = self._file ### full path
new_filename = findParentDir(filename)+'/hopach/columns.'+findFileName(filename)
try: os.mkdir(findParentDir(new_filename)) ### create "hopach" dir if not present
except Exception: None
new_filename = '"'+new_filename+'"'
return new_filename
def display(self):
print self.data
class FormatData:
def setdata(self,value):
self.data = value
def transform(self):
self.data = checktype(self.data)
def display(self):
print self.data
def returndata(self):
return self.data
def checktype(object):
###Checks to see if item is a list or dictionary. If dictionary, convert to list
import types
if type(object) is types.DictType:
object = converttolist(object)
elif type(object) is types.ListType:
object = object
elif type(object) is types.TupleType:
object = list(object)
elif type(object) is types.StringType:
object = importtable(object)
return object
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def checklinelengths(filename):
fn=filepath(filename); first_row='yes'; line_number=0
for line in open(fn,'rU').xreadlines():
try: data = cleanUpLine(line)
except Exception: print 'error parsing the line:',[line], line_number
t = string.split(data,'\t')
if first_row == 'yes':
elements = len(t)
first_row = 'no'
else:
if len(t) != elements:
print "Line number", line_number, "contains",len(t),"elements, when",elements,"expected...kill program"
print filename; kill
line_number+=1
def converttolist(dictionary):
###Converts dictionary to list by appending the dictionary key as the first item in the list
converted_lists=[]
for key in dictionary:
dictionary_list = dictionary[key]
dictionary_list.reverse(); dictionary_list.append(key); dictionary_list.reverse()
converted_lists.append(dictionary_list)
return converted_lists
############ IMPORT FILES BEGIN ############
def importtable(filename):
fn=filepath(filename); tab_db = []
for line in open(fn,'rU').readlines():
data,null = string.split(line,'\n')
t = string.split(data,'\t')
tab_db.append(t)
return tab_db
def filepath(filename):
dir=os.path.dirname(__file__) #directory file is input as a variable
status = verifyFile(filename)
if status:
fn = filename
else:
fn=os.path.join(dir,filename)
return fn
def verifyFile(filename):
status = False
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = True;break
except Exception: status = False
return status
def findFileName(filename):
filename = string.replace(filename,'\\','/')
dataset_name = string.split(filename,'/')[-1]
return dataset_name
def findParentDir(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1
return filename[:x]
def setWorkingDirectory(filename):
### Set R's working directory when calling this module remotely
working_dir = findParentDir(filename)
setwd = 'setwd("%s")' % working_dir
try: r(setwd)
except Exception:
print [filename]
print [working_dir]
print traceback.format_exc()
kill
def read_directory(sub_dir):
dir=os.path.dirname(__file__)
#print "Working Directory:", r('getwd()')
working_dir = dir+'/'+sub_dir[1:]
setwd = 'setwd("%s")' % working_dir
r(setwd)
#print "Working Directory:", r('getwd()')
dir_list = os.listdir(dir +'/'+ sub_dir[1:]); dir_list2 = []
for entry in dir_list: #add in code to prevent folder names from being included
if entry[-4:] == ".txt" or entry[-4:] == ".csv": dir_list2.append(entry)
return dir_list2
def CreateFilesMonocle(filename,rawExpressionFile,species='Hs'):
first_row = True
key_db={}
key_list=[]
fn=filepath(filename)
offset=0
nonNumericsPresent=False
try:
import gene_associations
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception:
print "gene_symbols present"
gene_to_symbol={}
setWorkingDirectory(findParentDir(filename)[:-1])
try: os.mkdir(findParentDir(filename)+'/Monocle')
except Exception: None
#filename=self.File()
x = 0
data_name=findParentDir(filename)+'/Monocle/expressionFile.txt'
gene_name=findParentDir(filename)+'/Monocle/geneAnnotations.txt'
sample_name=findParentDir(filename)+'/Monocle/sampleGroups.txt'
gene_names = [];
gene_list=[];
dat=[];
export_cdt = open(sample_name,'w')
export_gene=open(gene_name,'w')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if first_row == True:
if 'row_clusters-flat' in t and 'row_clusters-flat' not in t[0]:
headers = string.join(t[2:],'\t')+'\n'
offset = 1
else:
headers = string.join(t[1:],'\t')+'\n'
first_row = False
else:
key = t[0]
if key!='column_clusters-flat':
key_list.append(key)
try: s = map(float,t[offset+1:])
except Exception:
nonNumericsPresent=True
key_db[key]=t
else:
clusters = map(str,t[offset+1:])
for key in key_list:
t = key_db[key]
s=[key]
if offset ==1: s.append('')
temp=[]
for value in t[offset+1:]:
try: temp.append(float(value))
except Exception: pass
min1=min(temp)
for value in t[offset+1:]:
try: s.append(str(float(value)-min1))
except Exception: s.append('0.000101')
key_db[key]=s
export_object = open(data_name,'w')
export_object.write(''+'\t'+headers) ### Header is the same for each file
for key in key_list:
t = key_db[key]
if offset > 0:
t = [t[0]]+t[1+offset:]
export_object.write(string.join(t,'\t')+'\n') ### Write z-score values and row names
export_object.close()
print 'File written...'
#return input_file
array_names = []; array_linker_db = {}; d = 0; i = 0
for entry in headers.split('\t'):
entry=cleanUpLine(entry)
if '::' in entry:
a = (entry.split("::"))
elif ':' in entry:
a = (entry.split(":"))
else:
a = (clusters[i],entry)
#entry=string.join(a,'.')
ent=entry+'\t'+a[0];
#if(ent[0].isdigit()):
# ent='X'+ent[0:]
#if '-' in ent:
# ent=string.replace(ent,'-','.')
#if '+' in ent:
# ent=string.replace(ent,'+','.')
#print j
array_names.append(ent);
i+=1
i=0
eheader = string.join(['']+['Group'],'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eheader)
for row in array_names:
export_cdt.write(row+'\n')
i+=1
export_cdt.close()
gheader = string.join(['']+ ['gene_short_name'],'\t')+'\n' ### format column-flat-clusters for export
export_gene.write(gheader)
for key in key_list:
proceed=False
### The commented out code just introduces errors and is not needed - re-evaluate in the future if needed
"""
if key in gene_to_symbol:
symbol = gene_to_symbol[key][0]
if symbol in gene_list:
nid = symbol
proceed = True
if proceed:
k=gene_list.index(nid)
export_object.write(line)
export_gene.write(key+'\n')
else:
export_gene.write(key+'\t'+key+'\n')"""
export_gene.write(key+'\t'+key+'\n')
export_object.close()
export_gene.close()
def reformatHeatmapFile(input_file):
import unique
export_file=string.replace(input_file,'Clustering-','Input-')
eo = export.ExportFile(export_file)
first_row = True
fn=filepath(input_file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if first_row == True:
if 'column_clusters-flat' not in t:
array_names = []
for i in t[2:]:
array_names.append(string.replace(i,':','-'))
#print array_names;sys.exit()
#array_names.append(i)
elif 'column_clusters-flat' in t:
array_clusters = t[2:]
unique_clusters = unique.unique(array_clusters)
ind=0; headers=[]
for c in array_clusters:
headers.append(c+'::'+array_names[ind])
ind+=1
headers = string.join(['uid']+headers,'\t')+'\n'
eo.write(headers)
first_row = False
else:
values = string.join([t[0]]+t[2:],'\t')+'\n'
eo.write(values)
return export_file, len(unique_clusters)
def run_JTKcycle(expFile,annotFile,Time_range1, Time_range2,No_of_Timepoints,No_of_replicates,timepoint_difference):
print 'Loading JTK-Cycle package in R'
path='"'+r_package_path+'/JTK_CYCLE.R"'
#print [path]
line = 'source(%s)' % path
print_out = r(line)
"""
if "Error" in print_out:
print 'Installing the R package "JTK_CYCLE.R" in Config/R'
print_out = r('install.packages("devtools")')
print print_out
print_out = r('library(devtools)')
print print_out
print_out = r('install_github("mfcovington/jtk-cycle")')
#print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("jtk-cycle")')
print print_out
print_out = r('library("JTK_CYCLE.R")')
sys,exit()
print_out = r('source("/Users/ram5ge/Desktop/Krithika/JTK_Cycle/JTK_CYCLE.R")');print print_out
if "Error" in print_out: print "JTK_CYCLE.R is missing"
else: print 'Loading JTK Cycle'
"""
print_out = r('project <- "JTK_output"')
print_out = r('options(stringsAsFactors=FALSE)');print print_out
a = '"'+annotFile+'"'
read_annot = 'annot <- read.delim(%s)' % a
print [read_annot]
print_out = r(read_annot);#print print_out
v = '"'+expFile+'"'
read_data = 'input_data <- read.delim(%s)' % v
print [read_data]
print_out = r(read_data);#print print_out
print_out = r('rownames(input_data) <- input_data[,1]');#print print_out
print_out = r('input_data <- input_data[,-1]');#print print_out
#dist_calc = r('jtkdist(24,1)')
dist_calc = 'jtkdist(%s,%s)' % (str(No_of_Timepoints), str(No_of_replicates))
print [dist_calc]
print_out = r(dist_calc);#print print_out
period_calc = 'periods <- %s:%s' %(str(Time_range1), str(Time_range2))
print [period_calc]
print_out = r(period_calc);#print print_out
j = str(timepoint_difference)
jtk_calc = 'jtk.init(periods,%s)' % j
print [jtk_calc]
print_out = r(jtk_calc);#print print_out
v = 'cat("JTK analysis started on",date(),"\n")'
print [v]
print_out = r(v);#print print_out
print_out = r('flush.console()');#print print_out
v = 'st <- system.time({res <- apply(data,1,function(z)'
v+= ' {jtkx(z); c(JTK.ADJP,JTK.PERIOD,JTK.LAG,JTK.AMP)});'
v+= ' res <- as.data.frame(t(res)); bhq <- p.adjust(unlist(res[,1]),"BH");'
v+= ' res <- cbind(bhq,res); colnames(res) <- c("BH.Q","ADJ.P","PER","LAG","AMP");'
v+= ' results <- cbind(annot,res,data); results <- results[order(res$ADJ.P,-res$AMP),]})'
print [v]
print_out = r(v); print print_out
#print_out = r('dim(X)');print print_out
print_out = r('print(st)');print #print_out
print_out = r('save(results,file=paste("JTK",project,"rda",sep="."))');#print print_out
print_out = r('write.table(results,file=paste("JTK",project,"txt",sep="."),row.names=F,col.names=T,quote=F,sep="\t")');#print print_out
def performMonocleAnalysisFromHeatmap(species,heatmap_output_dir,rawExpressionFile):
numGroups=10
if 'Clustering-' in heatmap_output_dir:
export_file,numGroups = reformatHeatmapFile(heatmap_output_dir)
#else:
export_file = heatmap_output_dir;
CreateFilesMonocle(export_file,rawExpressionFile,species=species)
print 'Looking for',numGroups, 'Monocle groups in the input expression file.'
remoteMonocle(export_file,expPercent=5,pval=0.05,numGroups=numGroups)
if __name__ == '__main__':
expFile = '/Users/saljh8/Downloads/Liver_Smoothed_exp_steady_state.txt'
annotFile = '/Users/saljh8/Downloads/Liver_annot.txt'
Time_range1 = '10'
Time_range2 = '12'
No_of_Timepoints = '24'
No_of_replicates = '1'
timepoint_difference = '2'
run_JTKcycle(expFile,annotFile,Time_range1, Time_range2,No_of_Timepoints,No_of_replicates,timepoint_difference);sys.exit()
errors = []
cluster_method='array';metric_gene="";force_gene='';metric_array="euclid";force_array=''
analysis_method='hopach'; multtest_type = 'f'
#Sample log File
#Input-exp.MixedEffectsThanneer-DPF3%20DMRT3%20FOXA1%20SMAD6%20TBX3%20amplify%20monocle-hierarchical_cosine_correlated.txt
filename='/Users/saljh8/Desktop/cardiacRNASeq/DataPlots/Clustering-additionalExpressionSingleCell-annotated-hierarchical_cosine_cosine2.txt'
rawExpressionFile = filename
#filename = "/Volumes/SEQ-DATA/Eric/embryonic_singlecell_kidney/ExpressionOutput/Clustering/SampleLogFolds-Kidney.txt"
#filename = "/Volumes/SEQ-DATA/SingleCell-Churko/Filtered/Unsupervised-AllExons/NewCardiacMarkers1/FullDataset/ExpressionOutput/Clustering/SampleLogFolds-CM.txt"
#rawExpressionFile = '/Volumes/SEQ-DATA/SingleCell-Churko/Filtered/Unsupervised-AllExons/NewCardiacMarkers1/FullDataset/ExpressionInput/exp.CM-steady-state.txt'
#filename = '/Users/saljh8/Desktop/Stanford/ExpressionInput/amplify/DataPlots/Clustering-exp.EB-SingleCell-GPCR-hierarchical_cosine_correlation.txt'
#rawExpressionFile = '/Users/saljh8/Desktop/Stanford/ExpressionInput/exp.EB-SingleCell.txt'
performMonocleAnalysisFromHeatmap('Hs',filename,rawExpressionFile);sys.exit()
CreateFilesMonocle(filename,rawExpressionFile)
remoteMonocle(filename,expPercent=0,pval=0.01,numGroups=5);sys.exit()
filename = '/Users/nsalomonis/Downloads/GSE9440_RAW/ExpressionInput/exp.differentiation.txt'
remoteAffyNormalization(filename,'rma',True,'remove'); sys.exit()
print "******Analysis Method*******"
print "Options:"
print "1) Multtest (permutation ftest/ttest)"
print "2) HOPACH clustering"
print "3) limma 2-way ANOVA"
inp = sys.stdin.readline(); inp = inp.strip()
analysis_method_val = int(inp)
if analysis_method_val == 1: analysis_method = "multtest"
if analysis_method_val == 2: analysis_method = "hopach"
if analysis_method_val == 3: analysis_method = "limma"
if analysis_method == "hopach":
print "******Analysis Options*******"
print "Cluster type:"
print "1) genes only (cluster rows)"
print "2) arrays only (cluster columns)"
print "3) both"
inp = sys.stdin.readline(); inp = inp.strip()
cluster_type_call = int(inp)
if cluster_type_call == 1: cluster_method = "gene"
if cluster_type_call == 2: cluster_method = "array"
if cluster_type_call == 3: cluster_method = "both"
if cluster_method == "array" or cluster_method == "both":
print "******Analysis Options For Array Clustering*******"
print "Cluster metrics:"
print "1) euclidian distance (sensitive to magnitude)"
print "2) cosine angle distance (not sensitive to magnitude)"
print "3) correlation distance"
inp = sys.stdin.readline(); inp = inp.strip()
if cluster_method == "array" or cluster_method == "both":
metric_array_call = int(inp)
if metric_array_call == 1: metric_array = "euclid"
if metric_array_call == 2: metric_array = "cosangle"
if metric_array_call == 3: metric_array = "cor"
if cluster_method == "gene" or cluster_method == "both":
print "******Analysis Options For Gene Clustering*******"
print "Cluster metrics:"
print "1) euclidian distance (sensitive to magnitude)"
print "2) cosine angle distance (not sensitive to magnitude)"
print "3) correlation distance"
inp = sys.stdin.readline(); inp = inp.strip()
if cluster_method == "gene" or cluster_method == "both":
try: metric_gene_call = int(inp)
except ValueError: print [inp], 'not a valid option'; sys.exit()
if metric_gene_call == 1: metric_gene = "euclid"
if metric_gene_call == 2: metric_gene = "cosangle"
if metric_gene_call == 3: metric_gene = "cor"
if metric_gene == "cosangle":
print "******Analysis Options*******"
print "Absolute Clustering:"
print "1) yes"
print "2) no"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": metric_gene = "abscosangle"
print "Force Cluster Number for Arrays:"
print "Enter 'n' if you don't want to "
print "Enter number of clusters of arrays if you do"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == 'n' or inp == 'N': force_array = ''
else:force_array = int(inp)
working_dir = '/hopach_input'
if analysis_method == "multtest":
print "******Analysis Options*******"
print "Statistical test:"
print "1) ftest (for multiple groups)"
print "2) ttest (for two groups)"
inp = sys.stdin.readline(); inp = inp.strip()
multtest_type_call = int(inp)
if multtest_type_call == 1: multtest_type = "f"
if multtest_type_call == 2: multtest_type = "t"
working_dir = '/multtest_input'
if analysis_method == "limma":
working_dir = '/limma_input'
print "******Analysis Options*******"
print "Statistical test:"
print "1) Non-adjusted"
print "2) FDR"
inp = sys.stdin.readline(); inp = inp.strip()
limma_type_call = int(inp)
if limma_type_call == 1: limma_type = "nonadj"
if limma_type_call == 2: limma_type = "fdr"
dir_list = read_directory(working_dir)
for input in dir_list: #loop through each file in the directory to output results
input_file = working_dir + "/"+ input
input_file = input_file[1:] #not sure why, but the '\' needs to be there while reading initally but not while accessing the file late
z = RScripts(input_file)
if analysis_method == "hopach":
status = z.check_hopach_file_type()
if status == 'continue':
z.Hopach(cluster_method,metric_gene,force_gene,metric_array,force_array)
if analysis_method == "multtest":
status = z.check_multtest_file_type()
if status == 'continue':
z.Multtest(multtest_type)
if analysis_method == "limma":
status = z.check_limma_file_type()
if status == 'continue':
design_matrix_file = string.replace(input,'input','design')
contrast_matrix_file = string.replace(input,'input','contrast')
if design_matrix_file in dir_list and contrast_matrix_file in dir_list:
z.Limma(limma_type)
if analysis_method == "hopach":
if len(errors)>0:
print "**************ALL ERRORS**************"
for entry in errors:
print entry
else: print 'Execution complete... check outputs for verification'
|
nsalomonis/AltAnalyze
|
R_interface.py
|
Python
|
apache-2.0
| 55,242
|
[
"Bioconductor"
] |
105694ea3d7abc39019df49f86b752b67b3d19455ae6aeb21a8ebdcf1ae72903
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for HMMER table output format."""
from itertools import chain
import sys
# Add path to Bio
sys.path.append('../../..')
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio.Alphabet import generic_protein
from Bio.SearchIO._index import SearchIndexer
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
__all__ = ['Hmmer3TabParser', 'Hmmer3TabIndexer', 'Hmmer3TabWriter']
__docformat__ = "restructuredtext en"
class Hmmer3TabParser(object):
"""Parser for the HMMER table format."""
def __init__(self, handle):
self.handle = handle
self.line = self.handle.readline()
def __iter__(self):
header_mark = '#'
# read through the header if it exists
while self.line.startswith(header_mark):
self.line = self.handle.readline()
# if we have result rows, parse it
if self.line:
for qresult in self._parse_qresult():
yield qresult
def _parse_row(self):
"""Returns a dictionary of parsed row values."""
cols = [x for x in self.line.strip().split(' ') if x]
# if len(cols) > 19, we have extra description columns
# combine them all into one string in the 19th column
if len(cols) > 19:
cols[18] = ' '.join(cols[18:])
# if it's < 19, we have no description columns, so use an empty string
# instead
elif len(cols) < 19:
cols.append('')
assert len(cols) == 19
# assign parsed column data into qresult, hit, and hsp dicts
qresult = {}
qresult['id'] = cols[2] # query name
qresult['accession'] = cols[3] # query accession
hit = {}
hit['id'] = cols[0] # target name
hit['accession'] = cols[1] # target accession
hit['evalue'] = float(cols[4]) # evalue (full sequence)
hit['bitscore'] = float(cols[5]) # score (full sequence)
hit['bias'] = float(cols[6]) # bias (full sequence)
hit['domain_exp_num'] = float(cols[10]) # exp
hit['region_num'] = int(cols[11]) # reg
hit['cluster_num'] = int(cols[12]) # clu
hit['overlap_num'] = int(cols[13]) # ov
hit['env_num'] = int(cols[14]) # env
hit['domain_obs_num'] = int(cols[15]) # dom
hit['domain_reported_num'] = int(cols[16]) # rep
hit['domain_included_num'] = int(cols[17]) # inc
hit['description'] = cols[18] # description of target
hsp = {}
hsp['evalue'] = float(cols[7]) # evalue (best 1 domain)
hsp['bitscore'] = float(cols[8]) # score (best 1 domain)
hsp['bias'] = float(cols[9]) # bias (best 1 domain)
# strand is always 0, since HMMER now only handles protein
frag = {}
frag['hit_strand'] = frag['query_strand'] = 0
frag['alphabet'] = generic_protein
return {'qresult': qresult, 'hit': hit, 'hsp': hsp, 'frag': frag}
def _parse_qresult(self):
"""Generator function that returns QueryResult objects."""
# state values, determines what to do for each line
state_EOF = 0
state_QRES_NEW = 1
state_QRES_SAME = 3
# initial value dummies
qres_state = None
file_state = None
prev_qid = None
cur, prev = None, None
# container for Hit objects, used to create QueryResult
hit_list = []
while True:
# store previous line's parsed values for all lines after the first
if cur is not None:
prev = cur
prev_qid = cur_qid
# only parse the result row if it's not EOF
# NOTE: we are not parsing the extra '#' lines appended to the end
# of hmmer31b1 tabular results since storing them in qresult
# objects means we can not do a single-pass parsing
if self.line and not self.line.startswith('#'):
cur = self._parse_row()
cur_qid = cur['qresult']['id']
else:
file_state = state_EOF
# mock value for cur_qid, since we have nothing to parse
cur_qid = None
if prev_qid != cur_qid:
qres_state = state_QRES_NEW
else:
qres_state = state_QRES_SAME
if prev is not None:
# since domain tab formats only have 1 Hit per line
# we always create HSPFragment, HSP, and Hit per line
prev_hid = prev['hit']['id']
# create fragment and HSP and set their attributes
frag = HSPFragment(prev_hid, prev_qid)
for attr, value in prev['frag'].items():
setattr(frag, attr, value)
hsp = HSP([frag])
for attr, value in prev['hsp'].items():
setattr(hsp, attr, value)
# create Hit and set its attributes
hit = Hit([hsp])
for attr, value in prev['hit'].items():
setattr(hit, attr, value)
hit_list.append(hit)
# create qresult and yield if we're at a new qresult or at EOF
if qres_state == state_QRES_NEW or file_state == state_EOF:
qresult = QueryResult(hit_list, prev_qid)
for attr, value in prev['qresult'].items():
setattr(qresult, attr, value)
yield qresult
# if we're at EOF, break
if file_state == state_EOF:
break
hit_list = []
self.line = self.handle.readline()
class Hmmer3TabIndexer(SearchIndexer):
"""Indexer class for HMMER table output."""
_parser = Hmmer3TabParser
# denotes column location for query identifier
_query_id_idx = 2
def __iter__(self):
"""Iterates over the file handle; yields key, start offset, and length."""
handle = self._handle
handle.seek(0)
query_id_idx = self._query_id_idx
qresult_key = None
header_mark = _as_bytes('#')
split_mark = _as_bytes(' ')
# set line with initial mock value, to emulate header
line = header_mark
# read through header
while line.startswith(header_mark):
start_offset = handle.tell()
line = handle.readline()
# and index the qresults
while True:
end_offset = handle.tell()
if not line:
break
cols = [x for x in line.strip().split(split_mark) if x]
if qresult_key is None:
qresult_key = cols[query_id_idx]
else:
curr_key = cols[query_id_idx]
if curr_key != qresult_key:
adj_end = end_offset - len(line)
yield _bytes_to_string(qresult_key), start_offset, \
adj_end - start_offset
qresult_key = curr_key
start_offset = adj_end
line = handle.readline()
if not line:
yield _bytes_to_string(qresult_key), start_offset, \
end_offset - start_offset
break
def get_raw(self, offset):
"""Returns the raw string of a QueryResult object from the given offset."""
handle = self._handle
handle.seek(offset)
query_id_idx = self._query_id_idx
qresult_key = None
qresult_raw = _as_bytes('')
split_mark = _as_bytes(' ')
while True:
line = handle.readline()
if not line:
break
cols = [x for x in line.strip().split(split_mark) if x]
if qresult_key is None:
qresult_key = cols[query_id_idx]
else:
curr_key = cols[query_id_idx]
if curr_key != qresult_key:
break
qresult_raw += line
return qresult_raw
class Hmmer3TabWriter(object):
"""Writer for hmmer3-tab output format."""
def __init__(self, handle):
self.handle = handle
def write_file(self, qresults):
"""Writes to the handle.
Returns a tuple of how many QueryResult, Hit, and HSP objects were written.
"""
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
try:
first_qresult = next(qresults)
except StopIteration:
handle.write(self._build_header())
else:
# write header
handle.write(self._build_header(first_qresult))
# and then the qresults
for qresult in chain([first_qresult], qresults):
if qresult:
handle.write(self._build_row(qresult))
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
return qresult_counter, hit_counter, hsp_counter, frag_counter
def _build_header(self, first_qresult=None):
"""Returns the header string of a HMMER table output."""
# calculate whitespace required
# adapted from HMMER's source: src/p7_tophits.c#L1083
if first_qresult is not None:
# qnamew = max(20, len(first_qresult.id))
qnamew = 20 # why doesn't the above work?
tnamew = max(20, len(first_qresult[0].id))
qaccw = max(10, len(first_qresult.accession))
taccw = max(10, len(first_qresult[0].accession))
else:
qnamew, tnamew, qaccw, taccw = 20, 20, 10, 10
header = "#%*s %22s %22s %33s\n" % \
(tnamew + qnamew + taccw + qaccw + 2, "",
"--- full sequence ----", "--- best 1 domain ----",
"--- domain number estimation ----")
header += "#%-*s %-*s %-*s %-*s %9s %6s %5s %9s %6s %5s %5s %3s " \
"%3s %3s %3s %3s %3s %3s %s\n" % (tnamew-1, " target name",
taccw, "accession", qnamew, "query name", qaccw,
"accession", " E-value", " score", " bias",
" E-value", " score", " bias", "exp",
"reg", "clu", " ov", "env", "dom", "rep",
"inc", "description of target")
header += "#%*s %*s %*s %*s %9s %6s %5s %9s %6s %5s %5s %3s %3s " \
"%3s %3s %3s %3s %3s %s\n" % (tnamew-1, "-------------------",
taccw, "----------", qnamew, "--------------------", qaccw,
"----------", "---------", "------", "-----", "---------",
"------", "-----", "---", "---", "---", "---", "---", "---",
"---", "---", "---------------------")
return header
def _build_row(self, qresult):
"""Returns a string or one row or more of the QueryResult object."""
rows = ''
# calculate whitespace required
# adapted from HMMER's source: src/p7_tophits.c#L1083
qnamew = max(20, len(qresult.id))
tnamew = max(20, len(qresult[0].id))
qaccw = max(10, len(qresult.accession))
taccw = max(10, len(qresult[0].accession))
for hit in qresult:
rows += "%-*s %-*s %-*s %-*s %9.2g %6.1f %5.1f %9.2g %6.1f %5.1f " \
"%5.1f %3d %3d %3d %3d %3d %3d %3d %s\n" % (tnamew, hit.id, taccw,
hit.accession, qnamew, qresult.id, qaccw, qresult.accession, hit.evalue,
hit.bitscore, hit.bias, hit.hsps[0].evalue, hit.hsps[0].bitscore,
hit.hsps[0].bias, hit.domain_exp_num, hit.region_num, hit.cluster_num,
hit.overlap_num, hit.env_num, hit.domain_obs_num,
hit.domain_reported_num, hit.domain_included_num, hit.description)
return rows
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/SearchIO/HmmerIO/hmmer3_tab.py
|
Python
|
gpl-2.0
| 12,592
|
[
"Biopython"
] |
28c793853ae6341f853f3accb9320e20b8a27aa3d392d8f991e486bf95ef24f4
|
""" This script submits a test production
"""
# pylint: disable=wrong-import-position, protected-access
import os
import json
from DIRAC.Core.Base.Script import Script
Script.parseCommandLine()
# from DIRAC
from DIRAC import gLogger
from DIRAC.ProductionSystem.Client.ProductionClient import ProductionClient
from DIRAC.ProductionSystem.Client.ProductionStep import ProductionStep
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Core.Workflow.Parameter import Parameter
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
def createWorkflowBodyStep1():
job = Job()
job.setName("mandelbrot raw")
job.setOutputSandbox(["*log"])
# this is so that the JOB_ID within the transformation can be evaluated on the fly in the job application, see below
job.workflow.addParameter(Parameter("JOB_ID", "000000", "string", "", "", True, False, "Initialize JOB_ID"))
# define the job workflow in 3 steps
# job step1: setup software
job.setExecutable("git clone https://github.com/bregeon/mandel4ts.git")
# job step2: run mandelbrot application
# note how the JOB_ID (within the transformation) is passed as an argument and will be evaluated on the fly
job.setExecutable("./mandel4ts/mandelbrot.py", arguments="-P 0.0005 -M 1000 -L @{JOB_ID} -N 200")
# job step3: upload data and set metadata
outputPath = os.path.join("/dirac/prodsys/mandelbrot/images/raw")
outputPattern = "data_*txt"
outputSE = "RAL-SE"
outputMetadata = json.dumps(
{"application": "mandelbrot", "image_format": "ascii", "image_width": 7680, "image_height": 200}
)
job.setExecutable(
"./mandel4ts/dirac-add-files.py",
arguments="%s '%s' %s '%s'" % (outputPath, outputPattern, outputSE, outputMetadata),
)
return job.workflow.toXML()
def createWorkflowBodyStep2():
job = Job()
job.setName("merge mandelbrot")
job.setOutputSandbox(["*log"])
# define the job workflow in 3 steps
# job step1: setup software
job.setExecutable("git clone https://github.com/bregeon/mandel4ts.git")
# job step2: run mandelbrot merge
job.setExecutable("./mandel4ts/merge_data.py")
# job step3: upload data and set metadata
outputPath = os.path.join("/dirac/prodsys/mandelbrot/images/merged")
outputPattern = "data_merged*txt"
outputSE = "RAL-SE"
nb_input_files = 7
outputMetadata = json.dumps(
{
"application": "mandelbrot",
"image_format": "ascii",
"image_width": 7680,
"image_height": 200 * nb_input_files,
}
)
job.setExecutable(
"./mandel4ts/dirac-add-files.py",
arguments="%s '%s' %s '%s'" % (outputPath, outputPattern, outputSE, outputMetadata),
)
return job.workflow.toXML()
def createProductionStep(name, type, inputQuery=None, outputQuery=None):
# create a production step
prodStep = ProductionStep()
prodStep.Name = name
prodStep.Type = type
prodStep.Inputquery = inputQuery
prodStep.Outputquery = outputQuery
return prodStep
# Set meta data fields in the DFC
fc = FileCatalog()
MDFieldDict = {
"application": "VARCHAR(128)",
"image_format": "VARCHAR(128)",
"image_width": "int",
"image_height": "int",
}
for MDField in MDFieldDict.keys():
MDFieldType = MDFieldDict[MDField]
res = fc.addMetadataField(MDField, MDFieldType)
if not res["OK"]:
gLogger.error("Failed to add metadata fields", res["Message"])
exit(-1)
# Instantiate the ProductionClient
prodClient = ProductionClient()
# Create the first production step and add it to the Production
outputquery = {"application": "mandelbrot", "image_format": "ascii", "image_width": 7680, "image_height": 200}
prodStep1 = createProductionStep("ImageProd", "MCSimulation", outputQuery=outputquery)
body = createWorkflowBodyStep1()
prodStep1.Body = body
res = prodClient.addProductionStep(prodStep1)
if not res["OK"]:
gLogger.error("Failed to add production step", res["Message"])
exit(-1)
# Create the second production step and add it to the Production
inputquery = {"application": "mandelbrot", "image_format": "ascii", "image_width": 7680, "image_height": 200}
outputquery = {"application": "mandelbrot", "image_format": "ascii", "image_width": 7680, "image_height": 1400}
prodStep2 = createProductionStep("MergeImage", "DataProcessing", inputQuery=inputquery, outputQuery=outputquery)
body = createWorkflowBodyStep2()
prodStep2.Body = body
prodStep2.ParentStep = prodStep1
res = prodClient.addProductionStep(prodStep2)
if not res["OK"]:
gLogger.error("Failed to add production step", res["Message"])
exit(-1)
# Get the production description
prodDesc = prodClient.prodDescription
# Create the production
prodName = "SeqProd"
res = prodClient.addProduction(prodName, json.dumps(prodDesc))
if not res["OK"]:
gLogger.error("Failed to add production", res["Message"])
exit(-1)
# Start the production, i.e. instantiate the transformation steps
res = prodClient.startProduction(prodName)
if not res["OK"]:
gLogger.error("Failed to start production", res["Message"])
exit(-1)
|
DIRACGrid/DIRAC
|
tests/System/dirac-test-prod-sys.py
|
Python
|
gpl-3.0
| 5,148
|
[
"DIRAC"
] |
b6a6f66ad87587e8dbee668940efb186436869a0b63cecff27071f1535a00995
|
"""
Base class for Gaussian process latent variable models
This is really not ready for release yet but is used by the gpasso model
"""
import sys
sys.path.append('./../..')
from pygp.gp import GP
import pdb
from pygp.optimize.optimize_base import opt_hyper
import scipy as SP
import scipy.linalg as linalg
def PCA(Y, components):
"""run PCA, retrieving the first (components) principle components
return [s0,w0]
s0: factors
w0: weights
"""
sv = linalg.svd(Y, full_matrices=0);
[s0, w0] = [sv[0][:, 0:components], SP.dot(SP.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v;
w0 *= v;
return [s0, w0]
class GPLVM(GP):
"""
derived class form GP offering GPLVM specific functionality
"""
__slots__ = ["gplvm_dimensions"]
def __init__(self, gplvm_dimensions=None, **kw_args):
"""gplvm_dimensions: dimensions to learn using gplvm, default -1; i.e. all"""
self.gplvm_dimensions = gplvm_dimensions
super(GPLVM, self).__init__(**kw_args)
def setData(self, gplvm_dimensions=None, **kw_args):
GP.setData(self, **kw_args)
#handle non-informative gplvm_dimensions vector
if self.gplvm_dimensions is None and gplvm_dimensions is None:
self.gplvm_dimensions = SP.arange(self.x.shape[1])
elif gplvm_dimensions is not None:
self.gplvm_dimensions = gplvm_dimensions
def _update_inputs(self, hyperparams):
"""update the inputs from gplvm models if supplied as hyperparms"""
if 'x' in hyperparams.keys():
self.x[:, self.gplvm_dimensions] = hyperparams['x']
def LML(self, hyperparams, priors=None, **kw_args):
"""
Calculate the log Marginal likelihood
for the given logtheta.
**Parameters:**
hyperparams : {'covar':CF_hyperparameters, ... }
The hyperparameters for the log marginal likelihood.
priors : [:py:class:`lnpriors`]
the prior beliefs for the hyperparameter values
Ifilter : [bool]
Denotes which hyperparameters shall be optimized.
Thus ::
Ifilter = [0,1,0]
has the meaning that only the second
hyperparameter shall be optimized.
kw_args :
All other arguments, explicitly annotated
when necessary.
"""
self._update_inputs(hyperparams)
#covariance hyper
LML = self._LML_covar(hyperparams)
#account for prior
if priors is not None:
plml = self._LML_prior(hyperparams, priors=priors, **kw_args)
LML -= SP.array([p[:, 0].sum() for p in plml.values()]).sum()
return LML
def LMLgrad(self, hyperparams, priors=None, **kw_args):
# pdb.set_trace()
"""
Returns the log Marginal likelihood for the given logtheta.
**Parameters:**
hyperparams : {'covar':CF_hyperparameters, ...}
The hyperparameters which shall be optimized and derived
priors : [:py:class:`lnpriors`]
The hyperparameters which shall be optimized and derived
"""
# Ideriv :
# indicator which derivativse to calculate (default: all)
self._update_inputs(hyperparams)
RV = self._LMLgrad_covar(hyperparams)
if self.likelihood is not None:
RV.update(self._LMLgrad_lik(hyperparams))
#gradients w.r.t x:
RV_ = self._LMLgrad_x(hyperparams)
#update RV
RV.update(RV_)
#prior
if priors is not None:
plml = self._LML_prior(hyperparams, priors=priors, **kw_args)
for key in RV.keys():
RV[key] -= plml[key][:, 1]
return RV
####PRIVATE####
def _LMLgrad_x(self, hyperparams):
"""GPLVM derivative w.r.t. to latent variables
"""
if not 'x' in hyperparams:
return {}
dlMl = SP.zeros([self.n,len(self.gplvm_dimensions)])
W = self._covar_cache['W']
#the standard procedure would be
#dlMl[n,i] = 0.5*SP.odt(W,dKx_n,i).trace()
#we can calcualte all the derivatives efficiently; see also interface of Kd_dx of covar
for i in xrange(len(self.gplvm_dimensions)):
d = self.gplvm_dimensions[i]
#dKx is general, not knowing that we are computing the diagonal:
dKx = self.covar.Kgrad_x(hyperparams['covar'], self.x, self.x, d)
dKx_diag = self.covar.Kgrad_xdiag(hyperparams['covar'], self.x, d)
#set diagonal
dKx.flat[::(dKx.shape[1] + 1)] = dKx_diag
#precalc elementwise product of W and K
WK = W * dKx
if 0:
#explicit calculation, slow!
#this is only in here to see what is done
for n in xrange(self.n):
dKxn = SP.zeros([self.n, self.n])
dKxn[n, :] = dKx[n, :]
dKxn[:, n] = dKx[n, :]
dlMl[n, i] = 0.5 * SP.dot(W, dKxn).trace()
pass
if 1:
#fast calculation
#we need twice the sum WK because of the matrix structure above, WK.diagonal() accounts for the double counting
dlMl[:, i] = 0.5 * (2 * WK.sum(axis=1) - WK.diagonal())
pass
RV = {'x':dlMl}
return RV
if __name__ == '__main__':
from pygp.covar import linear, noise, fixed, combinators
import logging as LG
LG.basicConfig(level=LG.DEBUG)
SP.random.seed(1)
#1. simulate data
N = 100
K = 3
D = 10
S = SP.random.randn(N, K)
W = SP.random.randn(D, K)
Y = SP.dot(W, S.T).T
Y += 0.5 * SP.random.randn(N, D)
[Spca, Wpca] = PCA(Y, K)
#reconstruction
Y_ = SP.dot(Spca, Wpca.T)
#construct GPLVM model
linear_cf = linear.LinearCFISO(n_dimensions=K)
noise_cf = noise.NoiseCFISO()
mu_cf = fixed.FixedCF(SP.ones([N,N]))
covariance = combinators.SumCF((mu_cf, linear_cf, noise_cf))
# covariance = combinators.SumCF((linear_cf, noise_cf))
#no inputs here (later SNPs)
X = Spca.copy()
#X = SP.random.randn(N,K)
gplvm = GPLVM(covar_func=covariance, x=X, y=Y)
gpr = GP(covar_func=covariance, x=X, y=Y[:, 0])
#construct hyperparams
covar = SP.log([0.1, 1.0, 0.1])
#X are hyperparameters, i.e. we optimize over them also
#1. this is jointly with the latent X
X_ = X.copy()
hyperparams = {'covar': covar, 'x': X_}
#for testing just covar params alone:
#hyperparams = {'covar': covar}
#evaluate log marginal likelihood
lml = gplvm.LML(hyperparams=hyperparams)
[opt_model_params, opt_lml] = opt_hyper(gplvm, hyperparams, gradcheck=False)
Xo = opt_model_params['x']
for k in xrange(K):
print SP.corrcoef(Spca[:, k], S[:, k])
print "=================="
for k in xrange(K):
print SP.corrcoef(Xo[:, k], S[:, k])
|
PMBio/pygp
|
pygp/gp/gplvm.py
|
Python
|
gpl-2.0
| 7,170
|
[
"Gaussian"
] |
e8ed0277aed3c464ab2a713b21696c304f65f8ce3f0c9c5c62be488c4b902e82
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import shutil
import tempfile
import subprocess
from distutils.spawn import find_executable
import mdtraj as md
from mdtraj.testing import get_fn, eq, DocStringFormatTester, skipif
import numpy as np
import scipy.sparse
###############################################################################
# Globals
###############################################################################
HBondDocStringTester = DocStringFormatTester(md.geometry.hbond)
HAVE_DSSP = find_executable('mkdssp')
tmpdir = None
def setup():
global tmpdir
tmpdir = tempfile.mkdtemp()
def teardown():
shutil.rmtree(tmpdir)
def test_hbonds():
t = md.load(get_fn('2EQQ.pdb'))
ours = md.geometry.hbond.kabsch_sander(t)
@skipif(not HAVE_DSSP, "This tests required mkdssp to be installed, from http://swift.cmbi.ru.nl/gv/dssp/")
def test_hbonds_against_dssp():
t = md.load(get_fn('2EQQ.pdb'))[0]
pdb = os.path.join(tmpdir, 'f.pdb')
dssp = os.path.join(tmpdir, 'f.pdb.dssp')
t.save(pdb)
cmd = ['mkdssp', '-i', pdb, '-o', dssp]
subprocess.check_output(' '.join(cmd), shell=True)
energy = scipy.sparse.lil_matrix((t.n_residues, t.n_residues))
# read the dssp N-H-->O column from the output file
with open(dssp) as f:
# skip the lines until the description of each residue's hbonds
while not f.readline().startswith(' # RESIDUE AA STRUCTURE'):
continue
for i, line in enumerate(f):
line = line.rstrip()
offset0, e0 = map(float, line[39:50].split(','))
offset1, e1 = map(float, line[61:72].split(','))
if e0 <= -0.5:
energy[int(i+offset0), i] = e0
if e1 <= -0.5:
energy[int(i+offset1), i] = e1
dssp = energy.todense()
ours = md.geometry.hbond.kabsch_sander(t)[0].todense()
# There is tricky issues with the rounding right at the -0.5 cutoff,
# so lets just check for equality with DSSP at -0.6 or less
eq((dssp < -0.6), (ours < -0.6))
eq(dssp[dssp < -0.6], ours[ours < -0.6], decimal=1)
def test_baker_hubbard_0():
t = md.load(get_fn('2EQQ.pdb'))
# print('to view the hbonds defined in 2EQQ by baker_hubbard()')
# print('put these commands into pymol on top of the pdb:\n')
# for e in md.geometry.hbond.baker_hubbard(t):
# print('distance RANK %d, RANK %d' % (e[1], e[2]))
# these are the results produced by the algorithm on this protein as
# of 11/26/13. This unit test basically just ensures that the method
# runs and produces the same results it did then. It's no guarentee that
# these are the "TRUE" hydrogen bonds in this system.
ref = np.array([[0, 10, 8], [0, 11, 7], [69, 73, 54], [76, 82, 65],
[119, 131, 89], [140, 148, 265], [166, 177, 122],
[181, 188, 231], [209, 217, 215], [221, 225, 184],
[228, 239, 186], [235, 247, 216], [262, 271, 143],
[298, 305, 115], [186, 191, 215], [413, 419, 392]])
eq(ref, md.geometry.hbond.baker_hubbard(t))
def test_baker_hubbard_1():
# no hydrogens in this file -> no hydrogen bonds
t = md.load(get_fn('1bpi.pdb'))
eq(np.zeros((0, 3), dtype=int), md.baker_hubbard(t))
def test_baker_hubbard_2():
t = md.load(get_fn('1vii_sustiva_water.pdb'))
triplets = md.baker_hubbard(t)
N = 1000
rows = triplets[:, 0] * N*N + triplets[:, 1] * N + triplets[:, 2]
# ensure that there aren't any repeat rows
eq(len(np.unique(rows)), len(rows))
|
marscher/mdtraj
|
MDTraj/geometry/tests/test_hbonds.py
|
Python
|
lgpl-2.1
| 4,586
|
[
"MDTraj",
"PyMOL"
] |
15513a2fe948f5b0b68ec161cdcab92f000ae4eafa8a978712a1b0b83f1c65ad
|
# -*- coding: utf-8 -*-
'''model.py: underlying model of photo data.'''
__author__ = 'drseergio@gmail.com (Sergey Pisarenko)'
import logging
import os
import sys
from PyQt4 import QtCore, QtGui
try:
from gi.repository import GExiv2
except ImportError:
print ('You must have GExiv2 installed, please visit:\n'
'http://redmine.yorba.org/projects/gexiv2/wiki')
sys.exit(1)
_LABEL_TAG = 'Xmp.xmp.Label'
_TAG_TAG = 'Iptc.Application2.Keywords'
from qeytaks import _DIFFERENT_VAL, _TAG_DIFF_CHAR
class PhotoModel(QtCore.QAbstractListModel):
def __init__(self):
super(PhotoModel, self).__init__()
self.paths = []
self.photos = {}
self.tags = {}
self.labels = {}
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.paths)
def data(self, index, role):
if not index.isValid or index.row() > len(self.paths):
return QtCore.QVariant()
path = self.paths[index.row()]
if role == QtCore.Qt.DisplayRole:
return QtCore.QVariant(self.photos[path].name())
if role == QtCore.Qt.DecorationRole:
return self.photos[path].icon
return QtCore.QVariant()
def AddPhoto(self, path):
path = os.path.abspath(str(path))
if path in self.paths or not os.path.isfile(path):
return
tags = []
try:
gexiv2_meta = GExiv2.Metadata(path)
tags = gexiv2_meta.get_tag_multiple(_TAG_TAG)
self.labels[path] = gexiv2_meta.get(_LABEL_TAG)
except Exception, e:
logging.error('Failed adding %s', path)
logging.exception(e)
return
self.beginInsertRows(QtCore.QModelIndex(), 0, 0)
self.paths.append(path)
photo = Photo(path, tags)
self.photos[path] = photo
for tag in tags:
if not tag in self.tags.keys():
self.tags[tag] = []
self.tags[tag].append(path)
self.endInsertRows()
def RemovePhotos(self, indexes):
self.beginRemoveRows(QtCore.QModelIndex(), 0, 0)
for index in indexes:
path = self.paths[index.row()]
del self.photos[path]
del self.labels[path]
for tag in self.tags.keys():
if path in self.tags[tag]:
self.tags[tag].remove(path)
del self.paths[index.row()]
self.endRemoveRows()
def ClearPhotos(self):
self.beginRemoveRows(QtCore.QModelIndex(), 0, 0)
self.paths = []
self.photos = {}
self.tags = {}
self.labels = {}
self.endRemoveRows()
def SaveChanges(self, tag_field, label_field, rows, progress):
paths = [self.paths[row] for row in rows]
gexivs = [GExiv2.Metadata(path) for path in paths]
if not label_field == _DIFFERENT_VAL:
for gexiv in gexivs:
gexiv[_LABEL_TAG] = label_field
for path in paths:
self.labels[path] = label_field
tags_all = tag_field.replace(' ', '').replace('\n', '').split(',')
if len(tags_all) == 1 and not tags_all[0]:
tags_opt = []
tags_common = []
else:
tags_opt = [tag[1:] for tag in tags_all if tag[0] == _TAG_DIFF_CHAR]
tags_common = [tag for tag in tags_all if tag[0] != _TAG_DIFF_CHAR]
for i in range(len(paths)):
progress.setValue(i)
path = paths[i]
gexiv = gexivs[i]
tags_curr = self.photos[path].tags
tags_new = tags_curr[:]
for tag in tags_curr:
if tag not in tags_opt and tag not in tags_common:
tags_new.remove(tag)
self.tags[tag].remove(path)
for tag in tags_common:
if tag not in tags_curr:
tags_new.append(tag)
if not tag in self.tags.keys():
self.tags[tag] = []
self.tags[tag].append(path)
self.photos[path].tags = tags_new
gexiv.set_tag_multiple(_TAG_TAG, tags_new)
for i in range(len(gexivs)):
gexiv = gexivs[i]
try:
gexiv.save_file()
except Exception, e:
logging.error('Failed to save %s', paths[i])
logging.exception(e)
class Photo(object):
def __init__(self, path, tags):
self.path = path
self.icon = QtGui.QIcon(
QtGui.QIcon(path).pixmap(72, 72))
self.tags = tags
def name(self):
return os.path.basename(self.path)
|
drseergio/qeytaks
|
qeytaks/model.py
|
Python
|
gpl-3.0
| 4,137
|
[
"VisIt"
] |
2b31b1e55c62d9230d8d9c972b824547bfe81a792a25ef54c6c5e5283090181b
|
import math
import operator
import os
#
from .memory import MemoryTree
from .optics import *
from . import abcd
import numpy as np
#
from numpy import pi, conj
from collections import OrderedDict as OD
#
def mode_overlap_q(q1, q2, lam=0.001552):
"""
Mode overlap (power transmission) between two coaxial Gaussian beams.
see: Applied Opitcs Volume 23 Page 4187: Alignment of Gaussian beams
Parameters
----------
q1, q1 : complex
q parameters of the two beams in the same positon.
Returns
-------
float
"""
w0_1 = abcd.q2w0(q1,lam)
w0_2 = abcd.q2w0(q2,lam)
_s = np.real(q1)-np.real(q2)
return 4 / ((w0_1/w0_2 + w0_2/w0_1)**2 + (_s*lam/np.pi)**2/(w0_1**2 * w0_2**2))
#
#
class Gaussian_Beam(object):
'''
All units are in mm
--------
A Gaussian Beam is defined by:
w0: waist radius,
z0: waist position,
lam: wavelength
n: refrective index out of optics
beam_name: name of the beam
-------
class methods are defined to init. a Gau. in a diff. way
'''
def __init__(self, w0=1, z0=0, lam=0.001552, n=1.0, beam_name="beam"):
self.w0 = w0
self.z0 = z0
self.lam = lam
self.n = n # here n is the RI of the media out of optics
self.q0 = abcd.w02q(w0=self.w0, lam=self.lam, n=self.n)
self.zR = w0**2*pi*n/lam
self.div = abcd.q2div(q=self.q0, lam=self.lam, n=self.n)
self.beam_name=beam_name
#
@classmethod
def from_q(cls, q, z , lam=0.001552, n=1.0, beam_name="beam"):
w0_ = abcd.q2w0(q)
z0_ = z-np.real(q)
lam_ = lam
return cls(w0_, z0_, lam_, n, beam_name)
#
@classmethod
def from_w_div(cls, w, z, div, lam=0.001552, n=1.0, beam_name="beam"):
'''
div: divergance in the unit of degree, positive sign denotes diverging z
w, z: beam radius "w" at a position "z" in the unit of millimeter
'''
w0_ = lam/np.tan(div*np.pi/180)/n/pi
zR_ = w0_**2*pi*n/lam
z0_ = z-np.sqrt((w/w0_)**2-1.0) * zR_
lam_ = lam
return cls(w0_, z0_, lam_, n, beam_name)
#
def get_w(self,z):
p_= abs(self.z0-z)
return self.w0*np.sqrt(1.0 + (p_ / self.zR)**2 )
#
def get_R(self,z):
p_= abs(self.z0-z)
return p_*(1.0 + (self.zR / p_)**2 )
#
class Optical_Path(object):
'''
All the lengths are in the unit of millimeter
optics_list: list of optics sorted by start position
beam_list: list of Gaussian beam sorted by waist position
path_length: the range of the date for beam ploting is from 0 to path_length
MemTree: A MemoryTree object that is used to create the optical path.
OP_name: the name of the optical path
'''
def __init__(self, optics_dict=OD(), beam_dict=OD(),
path_length=500.0, MemTree = None, OP_name="Path"):
self.optics_dict= optics_dict
self.beam_dict= beam_dict
self.path_length=path_length
if MemTree == None or type(MemTree) == MemoryTree:
self.in_MemTree = MemTree # read from .yml
else:
raise TypeError('MemTree should be a MemoryTree object from .yml file')
#
@classmethod
def load_yml(cls, filename='default'):
'''
create optical path from .yml file (file name should not contain .yml)
'''
_configdir = os.path.join(os.path.dirname(__file__), "optical_path_config")
_file = os.path.join(_configdir, filename+'.yml')
_c = MemoryTree(_file)
return cls.from_MemTree(_c)
@classmethod
def load_yml_from_full_path(cls, filepath='/User/default.yml'):
'''
create optical path from .yml file with absolute path
'''
_c = MemoryTree(filepath)
return cls.from_MemTree(_c)
#
@classmethod
def from_MemTree(cls,MemTree):
if not type(MemTree) == MemoryTree:
raise TypeError("a MemTree for optical path is needed")
_beam = MemTree.beam
_optics = MemTree.optics
# add lens
_optics_dict={}
for x in _optics._dict.keys():
if x.split('_')[0] == 'lens':
_position = float(_optics[x]['position'])
_focal = float(_optics[x]['focal_length'])
_lens=Optics(start_position=_position,stop_position=_position,
abcd_matrix=abcd.Mlens(_focal), part_name=x)
_optics_dict[_lens.part_name]=_lens
_OD_optics_dict=OD(sorted(_optics_dict.items(), key=lambda x: x[1].start_position))
# add beam
_beam_dict={}
for x in _beam._dict.keys():
if _beam[x]['initialize_method']=='w_z_div':
#
_Gau1 = Gaussian_Beam.from_w_div(w = _beam[x]['w_z'][0],
z = _beam[x]['w_z'][1],
div = _beam[x]['div'],
lam = _beam[x]['wavelength'],
n = _beam[x]['refrective_n'],
beam_name = x)
#
_beam_dict[_Gau1.beam_name]=_Gau1
#
elif _beam[x]['initialize_method']=='w0_z0':
#
_Gau2 = Gaussian_Beam(w0=_beam[x]['w0_z0'][0],
z0=_beam[x]['w0_z0'][1],
lam=_beam[x]['wavelength'],
n=_beam[x]['refrective_n'],
beam_name=x)
#
_beam_dict[_Gau2.beam_name]=_Gau2
_OD_beam_dict=OD(sorted(_beam_dict.items(), key=lambda x: x[1].z0))
#
_path_length = MemTree.general.path_length
#
return cls(_OD_optics_dict, _OD_beam_dict, _path_length, MemTree)
#
def show_path(self):
p1="Optics --- "
print(p1)
for x in self.optics_dict.values():
print( '\t'+x.part_name+": @" + str(x.start_position) )
p2="Beams --- "
print(p2)
for x in self.beam_dict.values():
print( '\t'+x.beam_name +": z0 " + str(x.z0) + " w0 "+ str(x.w0)+'@'+ str(x.lam))
#
def plotdata_OP(self, points=500):
'''
# return the data for plot the optical path
'''
'''
# generate the list for optics elements
_Olist=[]
for o in self.optics_list:
if o.start_position == o.stop_position:
_o_conponent=[o.start_position, o.abcd_matrix]
_Olist.append(_o_conponent)
'''
# generate x to plot
zs_to_plot = np.linspace(0, self.path_length+10, points)
# generate y to plot from each Gaussian beam
_Bplot=[]
for b in self.beam_dict.values():
w_to_plot=[abcd.q2w(abcd.qpropagate(b.z0, b.q0, self.Olist_qp, z),b.lam) for z in zs_to_plot]
_Bplot.append(w_to_plot)
return [zs_to_plot,_Bplot]
#
@property
def Olist_qp(self):
# generate the list that can be sent to abcd.qpropagate
_Olist=[]
for o in self.optics_dict.values():
if o.start_position == o.stop_position:
_o_conponent=[o.start_position, o.abcd_matrix]
_Olist.append(_o_conponent)
return _Olist
#
@property
def modematching(self):
_q_list_at_OP_end=[]
_matching_list=[]
# ulgy work around, lam should be alsways the same for each beam
#----------------------------------------------#
for _index, b in enumerate(self.beam_dict.values()):
_q_m = abcd.qpropagate(b.z0, b.q0, self.Olist_qp, self.path_length)
_q_list_at_OP_end.append(_q_m)
if _index>0:
_m = mode_overlap_q(_q_list_at_OP_end[_index],
_q_list_at_OP_end[_index-1],
b.lam)
_matching_list.append(_m)
#----------------------------------------------#
return _matching_list
#
def move_beam(self, beamname='', dz=0.0):
try:
self.beam_dict[beamname].z0 += dz
except:
raise KeyError('KeyError: Beam Name is not found')
#
def move_optics(self, opticsname='', dz=0.0):
try:
self.optics_dict[opticsname].start_position += dz
self.optics_dict[opticsname].stop_position += dz
except:
raise KeyError('KeyError: Optics Name is not found')
|
XueshiGuo/Gau-modematching
|
modematching/gaussian_beam.py
|
Python
|
mit
| 8,677
|
[
"Gaussian"
] |
1bbbaf7c03969466dd47c031c47f320abcaa31cd06f663a22f8271094e4d25ed
|
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from datetime import datetime
# Import the Category model
from rango.models import Category, Page, UserProfile
from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm
from rango.bing_search import run_query
def index(request):
# Query the database for a list of ALL categories currently stored.
# Order the categories by no. likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in our context_dict dictionary which will be passed to the template engine.
category_list = Category.objects.order_by('-likes')[:5]
context_dict = {'categories': category_list}
# Get top 5 pages
pages = Page.objects.order_by('-views')[:5]
context_dict['top_pages'] = pages
# Visits counter
visits = request.session.get('visits')
if not visits:
visits = 1
reset_last_visit_time = False
# Does the cookie last_visit exist?
last_visit = request.session.get('last_visit')
if last_visit:
# Cast the value to a Python date/time object
last_visit_time = datetime.strptime(last_visit[:-7], '%Y-%m-%d %H:%M:%S')
# if it's been more than a day since the last visit
if (datetime.now() - last_visit_time).days > 0:
visits += 1
# .. and flag that the cookie last visit needs to be updated
reset_last_visit_time = True
else:
# Cookie last_visit doesn't exist, so flag that it should be set
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = visits
context_dict['visits'] = visits
# Render the page
response = render(request, 'rango/index.html', context_dict)
# Return response back to the user, updating any cookies that need changed
return response
def about(request):
# Request the context of the request.
# The context contains information such as the client's machine details,
# for example.
context = RequestContext(request)
# Construct a dictionary to pass to the template engine as its context.
# Note the key boldmessage is the same as {{ boldmessage }} in the template!
context_dict = {'boldmessage': "Site is under construction!"}
# Return a rendered response to send to the client.
# We make use of the shortcut function to make our lives easier.
# Note that the first parameter is the template we wish to use.
return render_to_response('rango/about.html', context_dict, context)
def category(request, category_name_slug):
context_dict = {}
try:
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
pages = Page.objects.filter(category=category)
context_dict['pages'] = pages
context_dict['category'] = category
context_dict['category_name_slug'] = category_name_slug
except Category.DoesNotExist:
pass
return render(request, 'rango/category.html', context_dict)
def add_category(request):
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True)
return index(request)
else:
print(form.errors)
else:
form = CategoryForm()
return render(request, 'rango/add_category.html', {'form':form})
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
return category(request, category_name_slug)
else:
print(form.errors)
else:
form = PageForm()
context_dict = {'form': form, 'category': cat,
'category_name_slug': category_name_slug}
return render(request, 'rango/add_page.html', context_dict)
@login_required
def restricted(request):
return render(request, 'rango/restricted.html', {})
def search(request):
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
return render(request, 'rango/search.html', {'result_list': result_list})
|
bjing/tango_with_jango_project
|
rango/views.py
|
Python
|
gpl-3.0
| 4,884
|
[
"VisIt"
] |
b599f0c447084733dd1a7d108af9588a8babfdb5d8be646417e8ad70125be4e5
|
import sys,os
sys.path.insert(0,os.path.join(sys.path[0],'..'))
from xenoGI import parameters,blast,genomes,xenoGI
# Identifies xenoGI proteins with significant similarity to a provided
# protein multifasta, printing their xenoGI gene names to
# standard out, one per line. Expects to be run in a xenoGI working
# directory.
if __name__ == "__main__":
paramFN = sys.argv[1]
strainInfoFN = sys.argv[2] # include as argument so can pass in scaffold only set in xlMode case
proteinMultiFastaPath = sys.argv[3]
paramD = parameters.createParametersD(parameters.baseParamStr,paramFN)
genesO = genomes.genes(paramD['geneInfoFN'])
genesO.initializeGeneNumToNameD(paramD['geneInfoFN'])
strainNamesT = xenoGI.readStrainInfoFN(strainInfoFN)
evalueThresh = paramD['evalueThresh']
alignCoverThresh = paramD['alignCoverThresh']
percIdentThresh = paramD['percIdentThresh']
blastFilePath = paramD['blastFilePath']
blastFileJoinStr = paramD['blastFileJoinStr']
# Blast them vs. all strains
fastaDir = paramD['fastaFilePath'].split('*')[0]
allStrainsFileNamesL = []
for strain in strainNamesT:
allStrainsFileNamesL.append(os.path.join(fastaDir,strain+"_prot.fa"))
blast.runBlast([proteinMultiFastaPath],allStrainsFileNamesL,paramD)
# parse blast to identify ifams with similarity to model seqs
blastDir = os.path.split(blastFilePath)[0]
proteinMultiFastaFN = os.path.split(proteinMultiFastaPath)[-1]
# remove _prot.fa
proteinMultiFastaFN = proteinMultiFastaFN.split("_prot.fa")[0]
# if additional extension
proteinMultiFastaStem = os.path.splitext(proteinMultiFastaFN)[0]
outS = set()
for strainName in strainNamesT:
# remove dir (if any) from proteinMultiFastaPath
fileStr = proteinMultiFastaStem+blastFileJoinStr+strainName+'.out'
fn = os.path.join(blastDir,fileStr)
for g1,g2,evalue,alCov,pident,score in blast.parseBlastFile(fn,evalueThresh,alignCoverThresh,percIdentThresh):
# g2 will always be a xenoGI gene
outS.add((g2,genesO.numToName(g2)))
# sort by gene number and print
for geneNum,geneStr in sorted(outS):
print(geneStr)
|
ecbush/xenoGI
|
misc/getProteinsWithBlastHitsVsMultifasta.py
|
Python
|
gpl-3.0
| 2,242
|
[
"BLAST"
] |
0e5f6dcd95d3515ca3fa983ce9ea0ca70805806af9e2c9be0942eeba5b210f64
|
import sys
import subprocess
from .exceptions import PyperclipException
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.org """
PY2 = sys.version_info[0] == 2
text_type = unicode if PY2 else str # noqa
def init_osx_clipboard():
def copy_osx(text):
p = subprocess.Popen(['pbcopy', 'w'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_osx():
p = subprocess.Popen(['pbpaste', 'r'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_osx, paste_osx
def init_gtk_clipboard():
import gtk
def copy_gtk(text):
global cb
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
# for python 2, returns None if the clipboard is blank.
if clipboardContents is None:
return ''
else:
return clipboardContents
return copy_gtk, paste_gtk
def init_qt_clipboard():
# $DISPLAY should exist
from PyQt4.QtGui import QApplication
# use the global instance if it exists
app = QApplication.instance() or QApplication([])
def copy_qt(text):
cb = app.clipboard()
cb.setText(text)
def paste_qt():
cb = app.clipboard()
return text_type(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
def copy_xclip(text):
p = subprocess.Popen(['xclip', '-selection', 'c'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_xclip():
p = subprocess.Popen(['xclip', '-selection', 'c', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_xclip, paste_xclip
def init_xsel_clipboard():
def copy_xsel(text):
p = subprocess.Popen(['xsel', '-b', '-i'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode('utf-8'))
def paste_xsel():
p = subprocess.Popen(['xsel', '-b', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode('utf-8')
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
text.encode('utf-8')],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout.decode('utf-8')
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith('\n')
if clipboardContents.endswith('\n'):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_no_clipboard():
class ClipboardUnavailable(object):
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
if PY2:
def __nonzero__(self):
return False
else:
def __bool__(self):
return False
return ClipboardUnavailable(), ClipboardUnavailable()
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/io/clipboard/clipboards.py
|
Python
|
mit
| 4,047
|
[
"VisIt"
] |
7736496e790f3080724488e657469887442b1721c9be208b89c419d253944527
|
"""
API for initiating and tracking requests for credit from a provider.
"""
import datetime
import logging
import uuid
import pytz
from django.db import transaction
from lms.djangoapps.django_comment_client.utils import JsonResponse
from openedx.core.djangoapps.credit.exceptions import (
UserIsNotEligible,
CreditProviderNotConfigured,
RequestAlreadyCompleted,
CreditRequestNotFound,
InvalidCreditStatus,
)
from openedx.core.djangoapps.credit.models import (
CreditProvider,
CreditRequirementStatus,
CreditRequest,
CreditEligibility,
)
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
from student.models import User
from util.date_utils import to_timestamp
# TODO: Cleanup this mess! ECOM-2908
log = logging.getLogger(__name__)
def get_credit_providers(providers_list=None):
"""Retrieve all available credit providers or filter on given providers_list.
Arguments:
providers_list (list of strings or None): contains list of ids of credit providers
or None.
Returns:
list of credit providers represented as dictionaries
Response Values:
>>> get_credit_providers(['hogwarts'])
[
{
"id": "hogwarts",
"name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
"""
return CreditProvider.get_credit_providers(providers_list=providers_list)
def get_credit_provider_info(request, provider_id): # pylint: disable=unused-argument
"""Retrieve the 'CreditProvider' model data against provided
credit provider.
Args:
provider_id (str): The identifier for the credit provider
Returns: 'CreditProvider' data dictionary
Example Usage:
>>> get_credit_provider_info("hogwarts")
{
"provider_id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"provider_url": "https://credit.example.com/",
"provider_status_url": "https://credit.example.com/status/",
"provider_description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": False,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
"thumbnail_url": "https://credit.example.com/logo.png"
}
"""
credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id)
credit_provider_data = {}
if credit_provider:
credit_provider_data = {
"provider_id": credit_provider.provider_id,
"display_name": credit_provider.display_name,
"provider_url": credit_provider.provider_url,
"provider_status_url": credit_provider.provider_status_url,
"provider_description": credit_provider.provider_description,
"enable_integration": credit_provider.enable_integration,
"fulfillment_instructions": credit_provider.fulfillment_instructions,
"thumbnail_url": credit_provider.thumbnail_url
}
return JsonResponse(credit_provider_data)
@transaction.atomic
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": "0.95",
"user_username": "ron",
"user_email": "ron@example.com",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
u'User "%s" tried to initiate a request for credit in course "%s", '
u'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible
except CreditProvider.DoesNotExist:
log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
if shared_secret_key is None:
msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
provider_id=credit_provider.provider_id
)
log.error(msg)
raise CreditProviderNotConfigured(msg)
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
u'Cannot initiate credit request because the request with UUID "%s" '
u'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
requirement__course__course_key=course_key,
status="satisfied"
).reason["final_grade"]
# NOTE (CCB): Limiting the grade to seven characters is a hack for ASU.
if len(unicode(final_grade)) > 7:
final_grade = u'{:.5f}'.format(final_grade)
else:
final_grade = unicode(final_grade)
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
msg = 'Could not retrieve final grade from the credit eligibility table for ' \
'user [{user_id}] in course [{course_key}].'.format(user_id=user.id, course_key=course_key)
log.exception(msg)
raise UserIsNotEligible(msg)
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": "",
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
u'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
}
def update_credit_request_status(request_uuid, provider_id, status):
"""
Update the status of a credit request.
Approve or reject a request for a student to receive credit in a course
from a particular credit provider.
This function does NOT check that the status update is authorized.
The caller needs to handle authentication and authorization (checking the signature
of the message received from the credit provider)
The function is idempotent; if the request has already been updated to the status,
the function does nothing.
Arguments:
request_uuid (str): The unique identifier for the credit request.
provider_id (str): Identifier for the credit provider.
status (str): Either "approved" or "rejected"
Returns: None
Raises:
CreditRequestNotFound: No request exists that is associated with the given provider.
InvalidCreditStatus: The status is not either "approved" or "rejected".
"""
if status not in [CreditRequest.REQUEST_STATUS_APPROVED, CreditRequest.REQUEST_STATUS_REJECTED]:
raise InvalidCreditStatus
try:
request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id)
old_status = request.status
request.status = status
request.save()
log.info(
u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".',
request_uuid, old_status, status, provider_id
)
except CreditRequest.DoesNotExist:
msg = (
u'Credit provider with ID "{provider_id}" attempted to '
u'update request with UUID "{request_uuid}", but no request '
u'with this UUID is associated with the provider.'
).format(provider_id=provider_id, request_uuid=request_uuid)
log.warning(msg)
raise CreditRequestNotFound(msg)
def get_credit_requests_for_user(username):
"""
Retrieve the status of a credit request.
Returns either "pending", "approved", or "rejected"
Arguments:
username (unicode): The username of the user who initiated the requests.
Returns: list
Example Usage:
>>> get_credit_request_status_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return CreditRequest.credit_requests_for_user(username)
def get_credit_request_status(username, course_key):
"""Get the credit request status.
This function returns the status of credit request of user for given course.
It returns the latest request status for the any credit provider.
The valid status are 'pending', 'approved' or 'rejected'.
Args:
username(str): The username of user
course_key(CourseKey): The course locator key
Returns:
A dictionary of credit request user has made if any
"""
credit_request = CreditRequest.get_user_request_status(username, course_key)
return {
"uuid": credit_request.uuid,
"timestamp": credit_request.modified,
"course_key": credit_request.course.course_key,
"provider": {
"id": credit_request.provider.provider_id,
"display_name": credit_request.provider.display_name
},
"status": credit_request.status
} if credit_request else {}
|
simbs/edx-platform
|
openedx/core/djangoapps/credit/api/provider.py
|
Python
|
agpl-3.0
| 15,608
|
[
"VisIt"
] |
a24b1fd841d4f50347df08a6ec445e3db5534900a9a8d6f6f24579cb59345cbc
|
#!/usr/bin/env python
import sys
import numpy
import _hmm
class HMM:
"""A simple HMM implementation with dependency only on numpy able to handle discrete and gaussian-mixture HMMs."""
def __init__(self, num_states=2, num_distributions=1, transitions=None, pi=None, distributions=None, seed=None):
"""Initialize the model with values some or all of the arguments and randomly initializing the remainder."""
self.num_states = num_states
self.num_dists = num_distributions
# There is no error checking to ensure that the number of states/distributions match the shape of passed values
if seed is None:
self.rng = numpy.random.RandomState()
else:
self.rng = numpy.random.RandomState(seed=int(seed))
if transitions is not None:
self.transitions = numpy.array(transitions, dtype=numpy.float64)
else:
self.transitions = self.rng.rand(num_states, num_states).astype(numpy.float64)
self.transitions /= numpy.sum(self.transitions, axis=1).reshape(-1, 1)
if pi is not None:
self.pi = numpy.array(pi, dtype=numpy.float64)
else:
self.pi = self.rng.rand(num_states).astype(numpy.float64)
self.pi /= numpy.sum(self.pi, axis=0)
# distributions should be passed as a nested list or numpy array with the following characteristics:
# shape - [num_states, num_mixtures, 3]
# The last dimension contains the guassian components weight, mean, and variance
if distributions is None:
self.distributions = self.rng.rand(self.num_states, self.num_dists, 3).astype(numpy.float64)
else:
self.distributions = numpy.array(distributions).astype(numpy.float64)
self.distributions[:, :, 0] /= numpy.sum(self.distributions[:, :, 0], axis=1).reshape(-1, 1)
def find_probabilities(self, observations):
"""Calculate the probability of observations for each state and, if applicable, for each mixture component."""
self.probs = numpy.zeros((self.num_states, self.num_dists + 1, observations.shape[0]), dtype=numpy.float64)
_hmm.find_probabilities(observations, self.distributions, self.probs)
def find_alphas(self):
"""Calculate the probability of being in a given state at a given all previous observations."""
# self.alphas [num_states, num_observations] is the probability of being in that state after observations up
# to that point. For example self.alphas[1, 10] is the probability of being in state1 after the first 10
# observations
# self.scalars [num_observations] rescales each component so there is no variable overflow.
self.alphas = numpy.zeros((self.num_states, self.probs.shape[2]), dtype=numpy.float64)
self.scalars = numpy.zeros(self.probs.shape[2], dtype=numpy.float64)
_hmm.find_alphas(self.probs, self.pi, self.transitions, self.alphas, self.scalars)
def find_betas(self):
"""Calculate the probability of reaching a state given all of the future observations."""
# self.betas [num_states, num_observations] is the probability of being in that state after observations past
# that point. For example self.betas[1, 10] is the probability of being in state1 followed by observations 11
# and higher.
self.betas = numpy.zeros((self.num_states, self.probs.shape[2]), dtype=numpy.float64)
_hmm.find_betas(self.probs, self.transitions, self.scalars, self.betas)
def find_etas(self):
"""Calculate the probability of being in state i and time t-1 and transitioning to state j at time t."""
# self.etas [num_states, num_states, num_observations - 1] is the probability of observing the transition at
# time t-1 to time t from state i to state j.
self.etas = numpy.zeros((self.num_states, self.num_states, self.probs.shape[2] - 1), dtype=numpy.float64)
_hmm.find_etas(self.probs, self.transitions, self.alphas, self.betas, self.etas)
def find_gammas(self):
"""Calculate the probability of on observation coming from state i at time t and mixture component m (if continuous)."""
# self.gamma [num_states, num_mixtures, num_observations] is the probability of an observation arising
# from a particular component in a particular state.
self.gammas = numpy.zeros((self.num_states, self.num_dists, self.probs.shape[2]), dtype=numpy.float64)
_hmm.find_gammas(self.probs, self.alphas, self.betas, self.gammas)
def train(self, observations, threshold=1e-6, max_iterations=1000):
"""Optimize the model parameters based on a set of training sequences."""
# Training is stopped when either the maximum number of iterations is reached or the largest change in a
# parameter is smaller than the threshold.
change = numpy.inf
iteration = 0
new_pi = numpy.copy(self.pi)
new_transitions = numpy.copy(self.transitions)
new_distributions = numpy.copy(self.distributions)
while change > threshold and iteration < max_iterations:
new_pi.fill(0.0)
new_transitions.fill(0.0)
new_distributions.fill(0.0)
for h in range(len(observations)):
self.find_probabilities(observations[h])
self.find_alphas()
self.find_betas()
self.find_etas()
self.find_gammas()
if h == len(observations) - 1:
finalize = 1
else:
finalize = 0
change = _hmm.update_parameter_estimates(observations[h], self.pi, self.transitions,
self.distributions, new_pi, new_transitions,
new_distributions, self.etas, self.gammas, finalize)
print >> sys.stderr, ("\rIteration: %03i Change: %.5f") % (iteration, change),
iteration += 1
print >> sys.stderr, ("\n"),
return
def find_path(self, observations):
self.find_probabilities(observations)
scores = numpy.zeros((self.num_states, observations.shape[0]), dtype=numpy.float64)
paths = numpy.zeros((self.num_states, observations.shape[0]), dtype=numpy.int32)
states = numpy.zeros(observations.shape[0], dtype=numpy.int32)
log_pi = numpy.zeros(self.pi.shape, dtype=numpy.float64) - numpy.inf
where = numpy.where(self.pi > 0.0)
log_pi[where] = numpy.log(self.pi[where])
log_transitions = numpy.zeros(self.transitions.shape, dtype=numpy.float64) - numpy.inf
where = numpy.where(self.transitions > 0.0)
log_transitions[where] = numpy.log(self.transitions[where])
log_probs = numpy.zeros((self.probs.shape[0], self.probs.shape[2]), dtype=numpy.float64) - numpy.inf
where = numpy.where(self.probs[:, -1, :] > 0.0)
log_probs[where] = numpy.log(self.probs[where[0], -1, where[1]])
ll = _hmm.find_path(observations, log_pi, log_transitions, log_probs, scores, paths, states)
return states, ll
def generate_sequence(self, seq_len=100):
"""Generate a set of hidden states and emmissions from the model."""
states = numpy.zeros(seq_len, dtype=numpy.int32)
distributions = numpy.zeros(seq_len, dtype=numpy.int32)
observations = numpy.zeros(seq_len, dtype=numpy.float64)
pi_sums = numpy.zeros(self.pi.shape, dtype=numpy.float64)
transition_sums = numpy.zeros(self.transitions.shape, dtype=numpy.float64)
weight_sums = numpy.zeros((self.num_states, self.num_dists), dtype=numpy.float64)
rand_nums = self.rng.rand(seq_len, 2)
_hmm.generate_sequence(rand_nums, self.pi, self.transitions, self.distributions, pi_sums,
transition_sums, weight_sums, states, distributions)
observations[:] = self.rng.normal(self.distributions[states, distributions, 1],
self.distributions[states, distributions, 2] ** 0.5)
return states, observations
def test():
hmm1 = HMM(
seed=2001,
num_distributions=2,
num_states=2,
pi=[0.5, 0.5],
transitions=[[0.95, 0.05], [0.1, 0.9]],
distributions=[[[0.5, 1.5, 0.25], [0.5, -1.0, 0.25]], [[0.25, 2.5, 0.25], [0.75, 4.5, 0.5]]],
)
observations = []
for i in range(100):
temp = hmm1.generate_sequence(100)
observations.append(temp[1])
hmm2 = HMM(
seed=2002,
num_distributions=2,
num_states=2,
)
hmm2.train(observations, threshold=1e-6, max_iterations=400)
print 'True Pi: %s Learned Pi: %s' % (str(list(hmm1.pi)), str(list(hmm2.pi)))
for i in range(hmm1.num_states):
print 'State %i -' % i
print 'True transitions: %s Learned transitions: %s' % (str(list(hmm1.transitions[i, :])),
str(list(hmm2.transitions[i, :])))
print 'True distribution weights: %s Learned distribution weights %s' % (
str(list(hmm1.distributions[i, :, 0])),
str(list(hmm2.distributions[i, :, 0])))
print 'True distribution means: %s Learned distribution means %s' % (
str(list(hmm1.distributions[i, :, 1])),
str(list(hmm2.distributions[i, :, 1])))
print 'True distribution variances: %s Learned distribution variances %s' % (
str(list(hmm1.distributions[i, :, 2])),
str(list(hmm2.distributions[i, :, 2])))
|
bxlab/hifive
|
hifive/libraries/hmm.py
|
Python
|
mit
| 9,699
|
[
"Gaussian"
] |
9e17fc749bb5735478b624478cb84c9d5fde9485d4f2d3cf4d85d9b2e89477b5
|
# $Id: docutils_xml.py 7315 2012-01-18 10:16:20Z milde $
# Author: David Goodger, Paul Tremblay, Guenter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
Simple document tree Writer, writes Docutils XML according to
http://docutils.sourceforge.net/docs/ref/docutils.dtd.
"""
__docformat__ = 'reStructuredText'
import sys
import xml.sax.saxutils
from StringIO import StringIO
import docutils
from docutils import frontend, writers, nodes
class RawXmlError(docutils.ApplicationError): pass
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
None,
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = XMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = ''.join(visitor.output)
class XMLTranslator(nodes.GenericNodeVisitor):
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
# TODO: add stylesheet options similar to HTML and LaTeX writers?
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
xmlparser = xml.sax.make_parser()
"""SAX parser instance to check/exctract raw XML."""
xmlparser.setFeature(
"http://xml.org/sax/features/external-general-entities", True)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
self.settings = settings = document.settings
self.indent = self.newline = ''
if settings.newlines:
self.newline = '\n'
if settings.indents:
self.newline = '\n'
self.indent = ' '
self.level = 0 # indentation level
self.in_simple = 0 # level of nesting inside mixed-content elements
# Output
self.output = []
if settings.xml_declaration:
self.output.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
self.output.append(self.doctype)
self.output.append(self.generator % docutils.__version__)
# initialize XML parser
self.the_handle=TestXml()
self.xmlparser.setContentHandler(self.the_handle)
# generic visit and depart methods
# --------------------------------
def default_visit(self, node):
"""Default node visit method."""
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.starttag(xml.sax.saxutils.quoteattr))
self.level += 1
if isinstance(node, nodes.TextElement):
self.in_simple += 1
if not self.in_simple:
self.output.append(self.newline)
def default_departure(self, node):
"""Default node depart method."""
self.level -= 1
if not self.in_simple:
self.output.append(self.indent*self.level)
self.output.append(node.endtag())
if isinstance(node, nodes.TextElement):
self.in_simple -= 1
if not self.in_simple:
self.output.append(self.newline)
# specific visit and depart methods
# ---------------------------------
def visit_Text(self, node):
text = xml.sax.saxutils.escape(node.astext())
self.output.append(text)
def depart_Text(self, node):
pass
def visit_raw(self, node):
if 'xml' not in node.get('format', '').split():
# skip other raw content?
# raise nodes.SkipNode
self.default_visit(node)
return
# wrap in <raw> element
self.default_visit(node) # or not?
xml_string = node.astext()
self.output.append(xml_string)
self.default_departure(node) # or not?
# Check validity of raw XML:
if isinstance(xml_string, unicode) and sys.version_info < (3,):
xml_string = xml_string.encode('utf8')
try:
self.xmlparser.parse(StringIO(xml_string))
except xml.sax._exceptions.SAXParseException, error:
col_num = self.the_handle.locator.getColumnNumber()
line_num = self.the_handle.locator.getLineNumber()
srcline = node.line
if not isinstance(node.parent, nodes.TextElement):
srcline += 2 # directive content start line
msg = 'Invalid raw XML in column %d, line offset %d:\n%s' % (
col_num, line_num, node.astext())
self.warn(msg, source=node.source, line=srcline+line_num-1)
raise nodes.SkipNode # content already processed
class TestXml(xml.sax.ContentHandler):
def setDocumentLocator(self, locator):
self.locator = locator
|
ddd332/presto
|
presto-docs/target/sphinx/docutils/writers/docutils_xml.py
|
Python
|
apache-2.0
| 6,271
|
[
"VisIt"
] |
1d11159b161f45a341ad8dfa2003d88c4ceb15b90b5b78728db574043cce4b3b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.